hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14aff0f5078b8248cb7469fa7cbfb6729d443577 | 166 | py | Python | data_structures/queue/conftest.py | seattlechem/data-structures-and-algorithms | 376e465c0a5529ea7c5c4e972a9852b6340251ff | [
"MIT"
] | null | null | null | data_structures/queue/conftest.py | seattlechem/data-structures-and-algorithms | 376e465c0a5529ea7c5c4e972a9852b6340251ff | [
"MIT"
] | null | null | null | data_structures/queue/conftest.py | seattlechem/data-structures-and-algorithms | 376e465c0a5529ea7c5c4e972a9852b6340251ff | [
"MIT"
] | null | null | null | from .queue import Queue
import pytest
@pytest.fixture
def empty_queue():
return Queue()
@pytest.fixture
def small_queue():
return Queue([1, 2, 3, 4, 5])
| 12.769231 | 33 | 0.680723 | 25 | 166 | 4.44 | 0.56 | 0.198198 | 0.288288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037313 | 0.192771 | 166 | 12 | 34 | 13.833333 | 0.791045 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.25 | 0.25 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
14b24d423b8c0fb79ec5771cce962910f4aa3a82 | 2,013 | py | Python | barmat/volume_integrals.py | FaustinCarter/barmat | 753725cda1c807afe4eacb385f1ab794579c100c | [
"MIT"
] | 4 | 2018-04-23T20:54:01.000Z | 2021-06-10T17:22:02.000Z | barmat/volume_integrals.py | FaustinCarter/barmat | 753725cda1c807afe4eacb385f1ab794579c100c | [
"MIT"
] | 3 | 2017-03-23T16:13:18.000Z | 2021-06-10T17:03:55.000Z | barmat/volume_integrals.py | FaustinCarter/barmat | 753725cda1c807afe4eacb385f1ab794579c100c | [
"MIT"
] | 1 | 2021-06-10T17:23:09.000Z | 2021-06-10T17:23:09.000Z | # coding=utf-8
from __future__ import division
import math as ma
import numba
@numba.jit("float64(float64, float64, float64)", nopython=True)
def intR(a, b, x):
r"""Calculate the R integral from Popel divided by x, (x = q*L0, L0 = zero-temp London depth).
Parameters
----------
a : float
b : float
x : float
Returns
-------
r : float
The R integral from Popel, divided by x.
Note
----
See R. Pöpel (1989), doi: 10.1063/1.343622 for more details."""
z2 = a**2+b**2
if x == 0:
r = b/(3.0*z2) #This is really r/x
#for small x
elif x < 0.01*ma.sqrt(z2):
r = b/(3.0*z2) #This is really r/x
#for large x
elif x > 100*ma.sqrt(z2):
r = (ma.pi*(1+(b**2-a**2)/x**2)/4-b/x)/x #This is really r/x
#in between x
else:
#calculate all the terms of r
r = (1/x**2)*(-0.5*b*x+0.25*a*b*ma.log((z2+x**2+2*a*x)/(z2+x**2-2*a*x))+
0.25*(x**2+b**2-a**2)*ma.atan2(2*b*x,(z2-x**2)))/x #This is really r/x
return r
@numba.jit("float64(float64, float64, float64)", nopython=True)
def intS(a, b, x):
r"""Calculate the R integral from Popel divided by x, (x = q*L0, L0 = zero-temp London depth).
Parameters
----------
a : float
b : float
x : float
Returns
-------
s : float
The S integral from Popel, divided by x.
Note
----
See R. Pöpel (1989), doi: 10.1063/1.343622 for more details."""
z2 = a**2+b**2
if x == 0:
s = a/(3.0*z2) #This is really s/x
#for small x
elif x < 0.01*ma.sqrt(z2):
s = a/(3.0*z2) #This is really s/x
#for large x
elif x > 100*ma.sqrt(z2):
s = (a/x - a*b*ma.pi/(2*x**2))/x #This is really s/x
#in between x
else:
#calculate all the terms of s
s = (1/x**2)*(0.5*(a*x)+
0.125*(x**2+b**2-a**2)*ma.log((z2+x**2+2*a*x)/(z2+x**2-2*a*x))-
0.5*b*a*ma.atan2(2*b*x,(z2-x**2)))/x #This is really s/x
return s
| 22.120879 | 98 | 0.513661 | 386 | 2,013 | 2.668394 | 0.202073 | 0.023301 | 0.093204 | 0.093204 | 0.862136 | 0.850485 | 0.834951 | 0.823301 | 0.801942 | 0.702913 | 0 | 0.100488 | 0.288127 | 2,013 | 90 | 99 | 22.366667 | 0.618283 | 0.42772 | 0 | 0.5 | 0 | 0 | 0.066084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.09375 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
14b7ea420a3998a704d99ff49d3edee44f4da77f | 18,789 | py | Python | rabbitmqalert/tests/test_argumentsparser.py | mdcatapult/rabbitmq-alert | cf0f83942084b471129010f3069089d58b2ac314 | [
"BSD-3-Clause"
] | 72 | 2016-06-02T13:27:33.000Z | 2022-02-23T09:06:34.000Z | rabbitmqalert/tests/test_argumentsparser.py | mdcatapult/rabbitmq-alert | cf0f83942084b471129010f3069089d58b2ac314 | [
"BSD-3-Clause"
] | 24 | 2015-12-07T17:34:28.000Z | 2022-01-25T05:46:59.000Z | rabbitmqalert/tests/test_argumentsparser.py | mdcatapult/rabbitmq-alert | cf0f83942084b471129010f3069089d58b2ac314 | [
"BSD-3-Clause"
] | 35 | 2016-06-29T10:14:28.000Z | 2021-11-18T09:09:57.000Z | #! /usr/bin/python2
# -*- coding: utf8 -*-
from collections import namedtuple
import mock
import unittest
from rabbitmqalert import argumentsparser
from rabbitmqalert.models import argument
from rabbitmqalert import rabbitmqalert
class ArgumentsParserTestCase(unittest.TestCase):
def setUp(self):
argumentsparser.os_real = argumentsparser.os
argumentsparser.apiclient.ApiClient_real = argumentsparser.apiclient.ApiClient
argumentsparser.argument.Argument_real = argumentsparser.argument.Argument
argumentsparser.argument.Argument.files_have_group_real = argumentsparser.argument.Argument.files_have_group
argumentsparser.argument.ConfigParser.ConfigParser_real = argumentsparser.argument.ConfigParser.ConfigParser
argumentsparser.argument.os_real = argumentsparser.argument.os
rabbitmqalert.argparse_real = rabbitmqalert.argparse
def tearDown(self):
argumentsparser.os = argumentsparser.os_real
argumentsparser.apiclient.ApiClient = argumentsparser.apiclient.ApiClient_real
argumentsparser.argument.Argument = argumentsparser.argument.Argument_real
argumentsparser.argument.Argument.files_have_group = argumentsparser.argument.Argument.files_have_group_real
argumentsparser.argument.ConfigParser.ConfigParser = argumentsparser.argument.ConfigParser.ConfigParser_real
argumentsparser.argument.os = argumentsparser.argument.os_real
rabbitmqalert.argparse = rabbitmqalert.argparse_real
def test_parse_calls_get_value_for_every_group_argument(self):
logger = mock.MagicMock()
# setup the argparse argument parser with fake cli arguments
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + self.arguments_dict_to_list(self.construct_arguments())
argparse_parser = rabbitmqalert.setup_arguments()
argumentsparser.argument.Argument = mock.MagicMock()
argumentsparser.apiclient.ApiClient.get_queues = mock.MagicMock()
parser = argumentsparser.ArgumentsParser(logger)
parser.validate = mock.MagicMock()
parser.format_conditions = mock.MagicMock()
parser.parse(argparse_parser)
# count the number of arguments
group_arguments_count = 0
for group in argparse_parser._action_groups:
for group_argument in group._group_actions:
group_arguments_count += 1
argumentsparser.argument.Argument.get_value.call_count == group_arguments_count
def test_parse_returns_discovered_queues_when_argument_set(self):
logger = mock.MagicMock()
# edit the cli arguments to look like queues discovery was requested
arguments = self.construct_arguments()
arguments["--queues-discovery"] = True
arguments_list = self.arguments_dict_to_list(arguments)
# setup the argparse argument parser with fake cli arguments
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
argumentsparser.apiclient.ApiClient.get_queues = mock.MagicMock(return_value=["foo-queue", "bar-queue"])
parser = argumentsparser.ArgumentsParser(logger)
parser.validate = mock.MagicMock()
parser.format_conditions = mock.MagicMock()
parser.parse(argparse_parser)
# create a copy of the arguments in the form they would look like after parsing them (behore calling validate)
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
arguments_dict["server_queues"] = ["foo-queue", "bar-queue"]
arguments_dict["email_to"] = arguments_dict["email_to"].split(",")
arguments_dict["help"] = None
arguments_dict["queue_conditions"] = dict()
arguments_dict["email_ssl"] = False
argumentsparser.apiclient.ApiClient.get_queues.assert_called_once()
parser.validate.assert_called_once_with(arguments_dict)
def test_parse_returns_emails_split(self):
logger = mock.MagicMock()
# edit the cli arguments to look like multiple email address were given
arguments = self.construct_arguments()
arguments["--email-to"] = "foo-email-to,bar-email-to"
arguments_list = self.arguments_dict_to_list(arguments)
# setup the argparse argument parser with fake cli arguments
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
parser = argumentsparser.ArgumentsParser(logger)
parser.validate = mock.MagicMock()
parser.format_conditions = mock.MagicMock()
parser.parse(argparse_parser)
# create a copy of the arguments in the form they would look like after parsing them (behore calling validate)
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
arguments_dict["server_queues"] = arguments_dict["server_queues"].split(",")
arguments_dict["email_to"] = arguments_dict["email_to"].split(",")
arguments_dict["help"] = None
arguments_dict["queue_conditions"] = dict()
arguments_dict["server_queues_discovery"] = False
arguments_dict["email_ssl"] = False
parser.validate.assert_called_once_with(arguments_dict)
def test_parse_skips_queue_conditions_when_non_standard_groups_do_not_exist(self):
logger = mock.MagicMock()
argumentsparser.argument.Argument.files_have_group = mock.MagicMock(return_value=False)
arguments_list = self.arguments_dict_to_list(self.construct_arguments())
# setup the argparse argument parser with fake cli arguments
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
parser = argumentsparser.ArgumentsParser(logger)
parser.validate = mock.MagicMock()
parser.format_conditions = mock.MagicMock()
parser.parse(argparse_parser)
# create a copy of the arguments in the form they would look like after parsing them (behore calling validate)
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
arguments_dict["server_queues"] = arguments_dict["server_queues"].split(",")
arguments_dict["email_to"] = arguments_dict["email_to"].split(",")
arguments_dict["help"] = None
arguments_dict["queue_conditions"] = dict()
arguments_dict["server_queues_discovery"] = False
arguments_dict["email_ssl"] = False
# checks for non-standard group for queue specific conditions
argumentsparser.argument.Argument.files_have_group.assert_called_once_with("Conditions:foo-queue")
# validate called with empty queue_conditions
parser.validate.assert_called_once_with(arguments_dict)
def test_parse_constructs_queue_conditions_when_non_standard_groups_exist(self):
logger = mock.MagicMock()
argumentsparser.argument.Argument.files_have_group = mock.MagicMock(return_value=True)
arguments_dict = self.construct_arguments()
arguments_dict["--queues"] = "foo-queue,bar-queue"
arguments_list = self.arguments_dict_to_list(arguments_dict)
# setup the argparse argument parser with fake cli arguments
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
parser = argumentsparser.ArgumentsParser(logger)
parser.validate = mock.MagicMock()
parser.format_conditions = mock.MagicMock()
parser.parse(argparse_parser)
# create a copy of the arguments in the form they would look like after parsing them (behore calling validate)
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
arguments_dict["server_queues"] = arguments_dict["server_queues"].split(",")
arguments_dict["email_to"] = arguments_dict["email_to"].split(",")
arguments_dict["help"] = None
arguments_dict["server_queues_discovery"] = False
arguments_dict["email_ssl"] = False
arguments_dict["queue_conditions"] = {
"foo-queue": {
"conditions_total_queue_size": 40,
"conditions_ready_queue_size": 20,
"conditions_queue_consumers_connected": 52,
"conditions_unack_queue_size": 30
},
"bar-queue": {
"conditions_total_queue_size": 40,
"conditions_ready_queue_size": 20,
"conditions_queue_consumers_connected": 52,
"conditions_unack_queue_size": 30
}
}
# checks for non-standard group for queue specific conditions
argumentsparser.argument.Argument.files_have_group.assert_any_call("Conditions:foo-queue")
argumentsparser.argument.Argument.files_have_group.assert_any_call("Conditions:bar-queue")
# validate called with empty queue_conditions
parser.validate.assert_called_once_with(arguments_dict)
def test_parse_returns_merged_arguments_and_conditions(self):
logger = mock.MagicMock()
arguments = self.construct_arguments()
arguments_list = self.arguments_dict_to_list(arguments)
# setup the argparse argument parser with fake cli arguments
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
parser = argumentsparser.ArgumentsParser(logger)
result = parser.parse(argparse_parser)
# create a copy of the arguments in the form they would look like after parsing them
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
arguments_dict["server_queues"] = arguments_dict["server_queues"].split(",")
arguments_dict["email_to"] = arguments_dict["email_to"].split(",")
arguments_dict["help"] = None
arguments_dict["server_queues_discovery"] = False
arguments_dict["email_ssl"] = False
arguments_dict["queue_conditions"] = dict()
arguments_dict = dict(arguments_dict.items() + parser.format_conditions(arguments_dict).items())
self.assertEquals(arguments_dict, result)
def test_validate_exits_when_required_argument_is_missing(self):
logger = mock.MagicMock()
arguments = self.construct_arguments()
del arguments["--host"]
arguments_list = self.arguments_dict_to_list(arguments)
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
parser = argumentsparser.ArgumentsParser(logger)
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
with self.assertRaises(SystemExit) as ex:
parser.validate(arguments_dict)
self.assertEqual(ex.exception.code, 1)
logger.error.assert_called_once_with("Required argument not defined: host")
def test_validate_does_not_exit_when_all_required_arguments_exist(self):
logger = mock.MagicMock()
arguments = self.construct_arguments()
arguments_list = self.arguments_dict_to_list(arguments)
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
parser = argumentsparser.ArgumentsParser(logger)
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
parser.validate(arguments_dict)
logger.error.assert_not_called()
def test_format_conditions_returns_generic_and_queue_conditions(self):
logger = mock.MagicMock()
arguments = self.construct_arguments()
arguments_list = self.arguments_dict_to_list(arguments)
# setup the argparse argument parser with fake cli arguments
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
arguments_dict["server_queues"] = ["foo-queue"]
arguments_dict["email_to"] = arguments_dict["email_to"].split(",")
arguments_dict["help"] = None
arguments_dict["queue_conditions"] = dict()
parser = argumentsparser.ArgumentsParser(logger)
results = parser.format_conditions(arguments_dict)
self.assertTrue("conditions" in results)
self.assertTrue("generic_conditions" in results)
# generic conditions
self.assertEquals(arguments_dict["conditions_consumers_connected"], results["generic_conditions"]["conditions_consumers_connected"])
self.assertEquals(arguments_dict["conditions_open_connections"], results["generic_conditions"]["conditions_open_connections"])
self.assertEquals(arguments_dict["conditions_nodes_running"], results["generic_conditions"]["conditions_nodes_running"])
self.assertEquals(arguments_dict["conditions_node_memory_used"], results["generic_conditions"]["conditions_node_memory_used"])
# queue conditions
self.assertEquals(arguments_dict["conditions_ready_queue_size"], results["conditions"]["foo-queue"]["conditions_ready_queue_size"])
self.assertEquals(arguments_dict["conditions_unack_queue_size"], results["conditions"]["foo-queue"]["conditions_unack_queue_size"])
self.assertEquals(arguments_dict["conditions_total_queue_size"], results["conditions"]["foo-queue"]["conditions_total_queue_size"])
self.assertEquals(arguments_dict["conditions_queue_consumers_connected"], results["conditions"]["foo-queue"]["conditions_queue_consumers_connected"])
def test_format_conditions_returns_queue_conditions_when_exist(self):
logger = mock.MagicMock()
arguments = self.construct_arguments()
arguments_list = self.arguments_dict_to_list(arguments)
# setup the argparse argument parser with fake cli arguments
rabbitmqalert.argparse._sys.argv = ['rabbitmqalert.py'] + arguments_list
argparse_parser = rabbitmqalert.setup_arguments()
arguments_dict = vars(argparse_parser.parse_args(arguments_list))
arguments_dict["server_queues"] = ["foo-queue", "bar-queue"]
arguments_dict["email_to"] = arguments_dict["email_to"].split(",")
arguments_dict["help"] = None
arguments_dict["queue_conditions"] = {
"foo-queue": {
"conditions_total_queue_size": 40,
"conditions_ready_queue_size": 20,
"conditions_queue_consumers_connected": 52,
"conditions_unack_queue_size": 30
},
"bar-queue": {
"conditions_total_queue_size": 40,
"conditions_ready_queue_size": 20,
"conditions_queue_consumers_connected": 52,
"conditions_unack_queue_size": 30
}
}
parser = argumentsparser.ArgumentsParser(logger)
results = parser.format_conditions(arguments_dict)
self.assertTrue("conditions" in results)
self.assertTrue("generic_conditions" in results)
# generic conditions
self.assertEquals(arguments_dict["conditions_consumers_connected"], results["generic_conditions"]["conditions_consumers_connected"])
self.assertEquals(arguments_dict["conditions_open_connections"], results["generic_conditions"]["conditions_open_connections"])
self.assertEquals(arguments_dict["conditions_nodes_running"], results["generic_conditions"]["conditions_nodes_running"])
self.assertEquals(arguments_dict["conditions_node_memory_used"], results["generic_conditions"]["conditions_node_memory_used"])
# queue conditions
self.assertTrue(arguments_dict["conditions_ready_queue_size"], results["conditions"]["foo-queue"]["conditions_ready_queue_size"])
self.assertEquals(arguments_dict["conditions_unack_queue_size"], results["conditions"]["foo-queue"]["conditions_unack_queue_size"])
self.assertEquals(arguments_dict["conditions_total_queue_size"], results["conditions"]["foo-queue"]["conditions_total_queue_size"])
self.assertEquals(arguments_dict["conditions_queue_consumers_connected"], results["conditions"]["foo-queue"]["conditions_queue_consumers_connected"])
self.assertTrue(arguments_dict["conditions_ready_queue_size"], results["conditions"]["bar-queue"]["conditions_ready_queue_size"])
self.assertEquals(arguments_dict["conditions_unack_queue_size"], results["conditions"]["bar-queue"]["conditions_unack_queue_size"])
self.assertEquals(arguments_dict["conditions_total_queue_size"], results["conditions"]["bar-queue"]["conditions_total_queue_size"])
self.assertEquals(arguments_dict["conditions_queue_consumers_connected"], results["conditions"]["bar-queue"]["conditions_queue_consumers_connected"])
@staticmethod
def construct_arguments():
return {
"--config-file": None,
"--scheme": "foo-scheme",
"--host": "foo-host",
"--port": "foo-port",
"--host-alias": "bar-host",
"--username": "foo-username",
"--password": "foo-password",
"--vhost": "foo-vhost",
"--queues": "foo-queue",
"--queues-discovery": False,
"--check-rate": "10",
"--ready-queue-size": "20",
"--unacknowledged-queue-size": "30",
"--total-queue-size": "40",
"--queue-consumers-connected": "52",
"--consumers-connected": "50",
"--open-connections": "51",
"--nodes-running": "60",
"--node-memory-used": "70",
"--email-to": "foo-email-to",
"--email-from": "foo-email-from",
"--email-subject": "foo-email-subject",
"--email-server": "foo-email-server",
"--email-password": "foo-email-password",
"--email-ssl": False,
"--slack-url": "foo-slack-url",
"--slack-channel": "foo-slack-channel",
"--slack-username": "foo-slack-username",
"--telegram-bot-id": "foo-telegram-bot-id",
"--telegram-channel": "foo-telegram-channel"
}
@staticmethod
def arguments_dict_to_list(dict):
result = []
for key, value in dict.iteritems():
if value not in [False, None]:
result.append(key)
# arguments of store_true or store_false action must not have a value
result.append(value) if type(value) is not bool else None
return result
if __name__ == "__main__":
unittest.main()
| 48.425258 | 157 | 0.700357 | 2,004 | 18,789 | 6.266467 | 0.099301 | 0.106625 | 0.03663 | 0.043876 | 0.810559 | 0.775123 | 0.762223 | 0.72424 | 0.709906 | 0.690954 | 0 | 0.003645 | 0.196924 | 18,789 | 387 | 158 | 48.550388 | 0.828617 | 0.082229 | 0 | 0.579137 | 0 | 0 | 0.215252 | 0.104199 | 0 | 0 | 0 | 0 | 0.133094 | 1 | 0.05036 | false | 0.007194 | 0.021583 | 0.003597 | 0.082734 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
210d97708e948defc016011d23120873587d7f47 | 28 | py | Python | segmentation_models_pytorch/transunet/__init__.py | PhilippMarquardt/segmentation_models.pytorch | 8a884bdf7a0c92a2eb4f5d85120a83cd13b08a06 | [
"MIT"
] | null | null | null | segmentation_models_pytorch/transunet/__init__.py | PhilippMarquardt/segmentation_models.pytorch | 8a884bdf7a0c92a2eb4f5d85120a83cd13b08a06 | [
"MIT"
] | null | null | null | segmentation_models_pytorch/transunet/__init__.py | PhilippMarquardt/segmentation_models.pytorch | 8a884bdf7a0c92a2eb4f5d85120a83cd13b08a06 | [
"MIT"
] | null | null | null | from .model import TransUnet | 28 | 28 | 0.857143 | 4 | 28 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 28 | 1 | 28 | 28 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
21148063577ce42922990fc414643ad210d8f4a4 | 95 | py | Python | Hello1.py | jkirkish/DojoAssignments | 3eb6e08132977af9d25449254a2caeb40f53c394 | [
"Adobe-Glyph",
"FSFAP"
] | null | null | null | Hello1.py | jkirkish/DojoAssignments | 3eb6e08132977af9d25449254a2caeb40f53c394 | [
"Adobe-Glyph",
"FSFAP"
] | null | null | null | Hello1.py | jkirkish/DojoAssignments | 3eb6e08132977af9d25449254a2caeb40f53c394 | [
"Adobe-Glyph",
"FSFAP"
] | null | null | null | name = "Jelly"
name = "Hello"
print (5+5)
print ("Joseph",name)
print("5",5)
print("Me")
| 13.571429 | 22 | 0.568421 | 15 | 95 | 3.6 | 0.466667 | 0.222222 | 0.259259 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051948 | 0.189474 | 95 | 6 | 23 | 15.833333 | 0.649351 | 0 | 0 | 0 | 0 | 0 | 0.213483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.666667 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
dcd90aa7dd1df367e933dd00d51dd35eca42c005 | 64 | py | Python | onix/views/__init__.py | jesuejunior/stone | e55d3d9a555a1bd0f8655b7684652187cd1f5d4b | [
"BSD-3-Clause"
] | 3 | 2016-06-16T22:47:42.000Z | 2019-10-13T15:29:16.000Z | onix/views/__init__.py | jesuejunior/stone | e55d3d9a555a1bd0f8655b7684652187cd1f5d4b | [
"BSD-3-Clause"
] | 1 | 2021-06-10T18:22:23.000Z | 2021-06-10T18:22:23.000Z | onix/views/__init__.py | jesuejunior/stone | e55d3d9a555a1bd0f8655b7684652187cd1f5d4b | [
"BSD-3-Clause"
] | null | null | null | from .home import *
from .block import *
from .material import * | 21.333333 | 23 | 0.734375 | 9 | 64 | 5.222222 | 0.555556 | 0.425532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.171875 | 64 | 3 | 23 | 21.333333 | 0.886792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
dcf18a2399e4f4cc092b7775baf685b002a4f9e4 | 69 | py | Python | cg_openmm/__init__.py | garrettameek/cg_openmm | 5ea52c9e6e2990953bdbcfa14f4e61a7d7efae7c | [
"MIT"
] | null | null | null | cg_openmm/__init__.py | garrettameek/cg_openmm | 5ea52c9e6e2990953bdbcfa14f4e61a7d7efae7c | [
"MIT"
] | null | null | null | cg_openmm/__init__.py | garrettameek/cg_openmm | 5ea52c9e6e2990953bdbcfa14f4e61a7d7efae7c | [
"MIT"
] | null | null | null | from . import build
from . import simulation
from . import utilities
| 17.25 | 24 | 0.782609 | 9 | 69 | 6 | 0.555556 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 69 | 3 | 25 | 23 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0d2781e79e44af2b0967bd184d59ce1c4089c670 | 189 | py | Python | technologies/app/jupyter/jupyter-base/tests/python3_lib_test.py | EtienneSIG/technologies | b143d814c3500c545a508e1965a7560e6aed90e6 | [
"Apache-2.0"
] | null | null | null | technologies/app/jupyter/jupyter-base/tests/python3_lib_test.py | EtienneSIG/technologies | b143d814c3500c545a508e1965a7560e6aed90e6 | [
"Apache-2.0"
] | null | null | null | technologies/app/jupyter/jupyter-base/tests/python3_lib_test.py | EtienneSIG/technologies | b143d814c3500c545a508e1965a7560e6aed90e6 | [
"Apache-2.0"
] | null | null | null | # Manual tests ...
import sys
print(sys.executable)
print(sys.version)
print(sys.version_info)
### FIXME find a way to test those installs
# - from hdfs.hfile import Hfile
# - import hdf5
| 18.9 | 43 | 0.73545 | 29 | 189 | 4.758621 | 0.689655 | 0.173913 | 0.217391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00625 | 0.153439 | 189 | 9 | 44 | 21 | 0.85625 | 0.534392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 0 | 1 | 0 | true | 0 | 0.25 | 0 | 0.25 | 0.75 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
0d35b5cc887897bb17dde3020cb668d62c69864c | 31 | py | Python | test_macro/fors/__init__.py | kerryeon/test-macro | a65f12d7f6f1a679070e974f2abacfed7634c2c6 | [
"MIT"
] | null | null | null | test_macro/fors/__init__.py | kerryeon/test-macro | a65f12d7f6f1a679070e974f2abacfed7634c2c6 | [
"MIT"
] | null | null | null | test_macro/fors/__init__.py | kerryeon/test-macro | a65f12d7f6f1a679070e974f2abacfed7634c2c6 | [
"MIT"
] | null | null | null | from .recorder import Recorder
| 15.5 | 30 | 0.83871 | 4 | 31 | 6.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 31 | 1 | 31 | 31 | 0.962963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b4aab581710d19724934ed8e7167357503e062b3 | 4,334 | py | Python | resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QGraphicsGridLayout.py | basepipe/developer_onboarding | 05b6a776f8974c89517868131b201f11c6c2a5ad | [
"MIT"
] | 1 | 2020-04-20T02:27:20.000Z | 2020-04-20T02:27:20.000Z | resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QGraphicsGridLayout.py | basepipe/developer_onboarding | 05b6a776f8974c89517868131b201f11c6c2a5ad | [
"MIT"
] | null | null | null | resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QGraphicsGridLayout.py | basepipe/developer_onboarding | 05b6a776f8974c89517868131b201f11c6c2a5ad | [
"MIT"
] | null | null | null | # encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
from QGraphicsLayout import QGraphicsLayout
class QGraphicsGridLayout(QGraphicsLayout):
# no doc
def addItem(self, *args, **kwargs): # real signature unknown
pass
def alignment(self, *args, **kwargs): # real signature unknown
pass
def columnAlignment(self, *args, **kwargs): # real signature unknown
pass
def columnCount(self, *args, **kwargs): # real signature unknown
pass
def columnMaximumWidth(self, *args, **kwargs): # real signature unknown
pass
def columnMinimumWidth(self, *args, **kwargs): # real signature unknown
pass
def columnPreferredWidth(self, *args, **kwargs): # real signature unknown
pass
def columnSpacing(self, *args, **kwargs): # real signature unknown
pass
def columnStretchFactor(self, *args, **kwargs): # real signature unknown
pass
def count(self, *args, **kwargs): # real signature unknown
pass
def horizontalSpacing(self, *args, **kwargs): # real signature unknown
pass
def invalidate(self, *args, **kwargs): # real signature unknown
pass
def itemAt(self, *args, **kwargs): # real signature unknown
pass
def removeAt(self, *args, **kwargs): # real signature unknown
pass
def removeItem(self, *args, **kwargs): # real signature unknown
pass
def rowAlignment(self, *args, **kwargs): # real signature unknown
pass
def rowCount(self, *args, **kwargs): # real signature unknown
pass
def rowMaximumHeight(self, *args, **kwargs): # real signature unknown
pass
def rowMinimumHeight(self, *args, **kwargs): # real signature unknown
pass
def rowPreferredHeight(self, *args, **kwargs): # real signature unknown
pass
def rowSpacing(self, *args, **kwargs): # real signature unknown
pass
def rowStretchFactor(self, *args, **kwargs): # real signature unknown
pass
def setAlignment(self, *args, **kwargs): # real signature unknown
pass
def setColumnAlignment(self, *args, **kwargs): # real signature unknown
pass
def setColumnFixedWidth(self, *args, **kwargs): # real signature unknown
pass
def setColumnMaximumWidth(self, *args, **kwargs): # real signature unknown
pass
def setColumnMinimumWidth(self, *args, **kwargs): # real signature unknown
pass
def setColumnPreferredWidth(self, *args, **kwargs): # real signature unknown
pass
def setColumnSpacing(self, *args, **kwargs): # real signature unknown
pass
def setColumnStretchFactor(self, *args, **kwargs): # real signature unknown
pass
def setGeometry(self, *args, **kwargs): # real signature unknown
pass
def setHorizontalSpacing(self, *args, **kwargs): # real signature unknown
pass
def setRowAlignment(self, *args, **kwargs): # real signature unknown
pass
def setRowFixedHeight(self, *args, **kwargs): # real signature unknown
pass
def setRowMaximumHeight(self, *args, **kwargs): # real signature unknown
pass
def setRowMinimumHeight(self, *args, **kwargs): # real signature unknown
pass
def setRowPreferredHeight(self, *args, **kwargs): # real signature unknown
pass
def setRowSpacing(self, *args, **kwargs): # real signature unknown
pass
def setRowStretchFactor(self, *args, **kwargs): # real signature unknown
pass
def setSpacing(self, *args, **kwargs): # real signature unknown
pass
def setVerticalSpacing(self, *args, **kwargs): # real signature unknown
pass
def sizeHint(self, *args, **kwargs): # real signature unknown
pass
def verticalSpacing(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
| 28.142857 | 80 | 0.6509 | 466 | 4,334 | 6 | 0.199571 | 0.209227 | 0.321888 | 0.283262 | 0.644134 | 0.644134 | 0.644134 | 0.630544 | 0 | 0 | 0 | 0.002144 | 0.246654 | 4,334 | 153 | 81 | 28.326797 | 0.854211 | 0.293493 | 0 | 0.473684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.473684 | false | 0.473684 | 0.031579 | 0 | 0.515789 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
b4ee9baafc61f7aeebcb34aa9f83f7509b1347a8 | 82 | py | Python | TermTk/TTkTestWidgets/__init__.py | UltraStudioLTD/pyTermTk | a1e96b0e7f43906b9fda0b16f19f427919a055c2 | [
"MIT"
] | 1 | 2022-02-28T16:33:25.000Z | 2022-02-28T16:33:25.000Z | TermTk/TTkTestWidgets/__init__.py | UltraStudioLTD/pyTermTk | a1e96b0e7f43906b9fda0b16f19f427919a055c2 | [
"MIT"
] | null | null | null | TermTk/TTkTestWidgets/__init__.py | UltraStudioLTD/pyTermTk | a1e96b0e7f43906b9fda0b16f19f427919a055c2 | [
"MIT"
] | null | null | null | from .logviewer import *
from .testwidget import *
from .testwidgetsizes import *
| 20.5 | 30 | 0.780488 | 9 | 82 | 7.111111 | 0.555556 | 0.3125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.146341 | 82 | 3 | 31 | 27.333333 | 0.914286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b4fdd6858ddb174637892f661e3766ac5c8a0236 | 188 | py | Python | classification.py | oushu1zhangxiangxuan1/learn-tensorflow | e83f8633fcbfd428ee3495b18b75ca78c7a25331 | [
"Apache-2.0"
] | null | null | null | classification.py | oushu1zhangxiangxuan1/learn-tensorflow | e83f8633fcbfd428ee3495b18b75ca78c7a25331 | [
"Apache-2.0"
] | null | null | null | classification.py | oushu1zhangxiangxuan1/learn-tensorflow | e83f8633fcbfd428ee3495b18b75ca78c7a25331 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tensorlfow.examples.tutorials.mnist import input_data
xs = tf.placeholder(tf.float32, [None, 784])
ys = tf.placeholder(tf.float32, [None, 10])
prediction =
| 20.888889 | 58 | 0.760638 | 27 | 188 | 5.259259 | 0.703704 | 0.183099 | 0.211268 | 0.309859 | 0.366197 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054545 | 0.12234 | 188 | 8 | 59 | 23.5 | 0.806061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.4 | null | null | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
37050b12c1f2e0d5211ff2641cce046d21598b08 | 240 | py | Python | trio_asyncio/__init__.py | ProvoK/trio-asyncio | 8098e93a63eedf7188545cbda45e54c0bcdd85fc | [
"Apache-2.0",
"MIT"
] | null | null | null | trio_asyncio/__init__.py | ProvoK/trio-asyncio | 8098e93a63eedf7188545cbda45e54c0bcdd85fc | [
"Apache-2.0",
"MIT"
] | null | null | null | trio_asyncio/__init__.py | ProvoK/trio-asyncio | 8098e93a63eedf7188545cbda45e54c0bcdd85fc | [
"Apache-2.0",
"MIT"
] | null | null | null | # This code implements basic asyncio compatibility
from ._version import __version__ # noqa
from .base import * # noqa
from .loop import * # noqa
from .util import * # noqa
from .async_ import * # noqa
from .adapter import * # noqa
| 24 | 50 | 0.7125 | 31 | 240 | 5.322581 | 0.483871 | 0.242424 | 0.339394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2125 | 240 | 9 | 51 | 26.666667 | 0.873016 | 0.325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2ea43309241365712afcb36edd2684632cfc7a85 | 8,051 | py | Python | algs14_hashtable/hashtable_linked_sorted.py | zhubaiyuan/learning-algorithms | ea9ee674878d535a9e9987c0d948c0357e0ed4da | [
"MIT"
] | null | null | null | algs14_hashtable/hashtable_linked_sorted.py | zhubaiyuan/learning-algorithms | ea9ee674878d535a9e9987c0d948c0357e0ed4da | [
"MIT"
] | null | null | null | algs14_hashtable/hashtable_linked_sorted.py | zhubaiyuan/learning-algorithms | ea9ee674878d535a9e9987c0d948c0357e0ed4da | [
"MIT"
] | null | null | null | from algs14_hashtable.entry import LinkedEntry
class SortedLinkedListsHashtable:
"""
Hashtable using array of M linked lists where keys appear in sorted order.
"""
def __init__(self, M=10):
self.table = [None] * M
if M < 1:
raise ValueError('Hashtable storage must be at least 1.')
self.M = M
self.N = 0
def __len__(self):
return self.N
def get(self, k):
"""
Retrieve value associated with key, k. STOP when entry is bigger than key.
"""
# First place it could be
hc = hash(k) % self.M
entry = self.table[hc]
while entry:
# Doesn't exist since keys in sorted order
if entry.key > k:
return None
if entry.key == k:
return entry.value
entry = entry.next
# Couldn't find
return None
def put(self, k, v):
"""
Associate value, v, with the key, k.
"""
# First place it could be
hc = hash(k) % self.M
entry = self.table[hc]
if entry is None:
self.N += 1
self.table[hc] = LinkedEntry(k, v, self.table[hc])
return
prev = None
while entry:
# Can insert since we didn't find
if entry.key > k:
self.N += 1
# new First
if prev is None:
self.table[hc] = LinkedEntry(k, v, entry)
else:
prev.next = LinkedEntry(k, v, entry)
return
# Overwrite if already here
if entry.key == k:
entry.value = v
return
prev, entry = entry, entry.next
# If we get here, key is largest among all, so append to end
prev.next = LinkedEntry(k, v)
self.N += 1
def remove(self, k):
"""
Remove (k,v) entry associated with k.
"""
# First place it could be
hc = hash(k) % self.M
entry = self.table[hc]
prev = None
while entry:
if entry.key == k:
if prev:
prev.next = entry.next
else:
self.table[hc] = entry.next
self.N -= 1
return entry.value
prev, entry = entry, entry.next
# Nothing was removed
return None
def __iter__(self):
"""
Generate all (k, v) tuples for entries in all linked lists table.
"""
for entry in self.table:
while entry:
yield (entry.key, entry.value)
entry = entry.next
def evaluate_hashtable_sorted_chains(output=True, decimals=4):
"""
Evaluate performance of separate chaining Hashtable with sorted entries.
"""
import timeit
from common.table import DataTable
print('Best Case Build Time')
tbl = DataTable([8, 20, 20, 20], ['M', 'Open Addressing',
'Separate Chaining', 'Sorted Chains'], output=output, decimals=decimals)
for size in [214129, 524287, 999983]:
timing_oa = min(timeit.repeat(stmt='''
ht = OpenHashtable({})
for w in reversed(words[:160564]):
ht.put(w,w)'''.format(size), setup='''
from algs14_hashtable.hashtable_open import OpenHashtable
from resources.english import english_words
words = english_words()''', repeat=7, number=5))/5
timing_sc = min(timeit.repeat(stmt='''
ht = LinkedHashtable({})
for w in reversed(words[:160564]):
ht.put(w,w)'''.format(size), setup='''
from algs14_hashtable.hashtable_linked import LinkedHashtable
from resources.english import english_words
words = english_words()''', repeat=7, number=5))/5
timing_sorted = min(timeit.repeat(stmt='''
ht = SortedLinkedListsHashtable({})
for w in reversed(words[:160564]):
ht.put(w,w)'''.format(size), setup='''
from algs14_hashtable.hashtable_linked_sorted import SortedLinkedListsHashtable
from resources.english import english_words
words = english_words()''', repeat=7, number=5))/5
tbl.row([size, timing_oa, timing_sc, timing_sorted])
print('Worst Case Build Time')
tbl = DataTable([8, 20, 20, 20], ['M', 'Open Addressing',
'Separate Chaining', 'Sorted Chains'], output=output, decimals=decimals)
for size in [214129, 524287, 999983]:
timing_oa = min(timeit.repeat(stmt='''
ht = OpenHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), setup='''
from algs14_hashtable.hashtable_open import OpenHashtable
from resources.english import english_words
words = english_words()''', repeat=7, number=5))/5
timing_sc = min(timeit.repeat(stmt='''
ht = LinkedHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), setup='''
from algs14_hashtable.hashtable_linked import LinkedHashtable
from resources.english import english_words
words = english_words()''', repeat=7, number=5))/5
timing_sorted = min(timeit.repeat(stmt='''
ht = SortedLinkedListsHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), setup='''
from algs14_hashtable.hashtable_linked_sorted import SortedLinkedListsHashtable
from resources.english import english_words
words = english_words()''', repeat=7, number=5))/5
tbl.row([size, timing_oa, timing_sc, timing_sorted])
print('Search First Half')
tbl = DataTable([8, 20, 20, 20], ['M', 'Open Addressing',
'Separate Chaining', 'Sorted Chains'], output=output, decimals=decimals)
for size in [214129, 524287, 999983]:
search_oa = min(timeit.repeat(stmt='''
for w in words[:160564]:
ht.get(w)''', setup='''
from algs14_hashtable.hashtable_open import OpenHashtable
from resources.english import english_words
words = english_words()
ht = OpenHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), repeat=7, number=5))/5
search_sc = min(timeit.repeat(stmt='''
for w in words[:160564]:
ht.get(w)''', setup='''
from algs14_hashtable.hashtable_linked import LinkedHashtable
from resources.english import english_words
words = english_words()
ht = LinkedHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), repeat=7, number=5))/5
search_sorted = min(timeit.repeat(stmt='''
for w in words[:160564]:
ht.get(w)''', setup='''
from algs14_hashtable.hashtable_linked_sorted import SortedLinkedListsHashtable
from resources.english import english_words
words = english_words()
ht = SortedLinkedListsHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), repeat=7, number=5))/5
tbl.row([size, search_oa, search_sc, search_sorted])
print('Search Back Half')
tbl = DataTable([8, 20, 20, 20], ['M', 'Open Addressing',
'Separate Chaining', 'Sorted Chains'], output=output, decimals=decimals)
for size in [214129, 524287, 999983]:
search_oa = min(timeit.repeat(stmt='''
for w in words[160564:]:
ht.get(w)''', setup='''
from algs14_hashtable.hashtable_open import OpenHashtable
from resources.english import english_words
words = english_words()
ht = OpenHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), repeat=7, number=5))/5
search_sc = min(timeit.repeat(stmt='''
for w in words[160564:]:
ht.get(w)''', setup='''
from algs14_hashtable.hashtable_linked import LinkedHashtable
from resources.english import english_words
words = english_words()
ht = LinkedHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), repeat=7, number=5))/5
search_sorted = min(timeit.repeat(stmt='''
for w in words[160564:]:
ht.get(w)''', setup='''
from algs14_hashtable.hashtable_linked_sorted import SortedLinkedListsHashtable
from resources.english import english_words
words = english_words()
ht = SortedLinkedListsHashtable({})
for w in words[:160564]:
ht.put(w,w)'''.format(size), repeat=7, number=5))/5
tbl.row([size, search_oa, search_sc, search_sorted])
| 34.405983 | 92 | 0.620792 | 1,045 | 8,051 | 4.694737 | 0.14067 | 0.058704 | 0.022014 | 0.033632 | 0.771912 | 0.737261 | 0.727477 | 0.727477 | 0.727477 | 0.727477 | 0 | 0.046442 | 0.251149 | 8,051 | 233 | 93 | 34.553648 | 0.767291 | 0.079245 | 0 | 0.811111 | 0 | 0 | 0.412443 | 0.098201 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038889 | false | 0 | 0.15 | 0.005556 | 0.244444 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2ea7bdc15084fd23b19cd3a6bcb6311df50cfdd9 | 35 | py | Python | PyBambooHR/__init__.py | zoni/PyBambooHR | a6536501c6dacb3a6b2bc48297925ce0dd499bee | [
"MIT"
] | null | null | null | PyBambooHR/__init__.py | zoni/PyBambooHR | a6536501c6dacb3a6b2bc48297925ce0dd499bee | [
"MIT"
] | null | null | null | PyBambooHR/__init__.py | zoni/PyBambooHR | a6536501c6dacb3a6b2bc48297925ce0dd499bee | [
"MIT"
] | null | null | null | from .PyBambooHR import PyBambooHR
| 17.5 | 34 | 0.857143 | 4 | 35 | 7.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 35 | 1 | 35 | 35 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2ec7bd616998921f665afcca6b9687c543f49ad7 | 104 | py | Python | tests/emulated_modules/sample_2.py | alisaifee/hiro | e93551b575c10729766b077bb1a79b1f87436a4e | [
"MIT"
] | 5 | 2017-03-16T06:55:38.000Z | 2021-04-07T15:42:23.000Z | tests/emulated_modules/sample_2.py | alisaifee/hiro | e93551b575c10729766b077bb1a79b1f87436a4e | [
"MIT"
] | 8 | 2017-01-12T12:26:58.000Z | 2020-05-26T02:20:57.000Z | tests/emulated_modules/sample_2.py | alisaifee/hiro | e93551b575c10729766b077bb1a79b1f87436a4e | [
"MIT"
] | 4 | 2016-06-20T11:32:14.000Z | 2019-06-27T07:14:44.000Z | import datetime
import time
from . import sub_module_2
__all__ = ["datetime", "time", "sub_module_2"]
| 14.857143 | 46 | 0.740385 | 15 | 104 | 4.6 | 0.533333 | 0.26087 | 0.289855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022472 | 0.144231 | 104 | 6 | 47 | 17.333333 | 0.752809 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2ee2644e73489fdecbc5d67baf80a77ce15a3d3c | 108 | py | Python | app/database/models/__init__.py | statar/chat_app_backend | f964c77395d400df47af3dbb663951e0c718636c | [
"MIT"
] | null | null | null | app/database/models/__init__.py | statar/chat_app_backend | f964c77395d400df47af3dbb663951e0c718636c | [
"MIT"
] | null | null | null | app/database/models/__init__.py | statar/chat_app_backend | f964c77395d400df47af3dbb663951e0c718636c | [
"MIT"
] | null | null | null | # src/database/models/__init__.py
from .user import *
from .user_actions import * # to do remove class | 21.6 | 48 | 0.722222 | 16 | 108 | 4.5625 | 0.8125 | 0.219178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.185185 | 108 | 5 | 48 | 21.6 | 0.829545 | 0.462963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2c0cf66e015204e34e79f95b9b8faa61d5ed2422 | 74 | py | Python | test/__init__.py | toogy/pendigits-hmm | 03382e1457941714439d40b67e53eaf117fe4d08 | [
"MIT"
] | null | null | null | test/__init__.py | toogy/pendigits-hmm | 03382e1457941714439d40b67e53eaf117fe4d08 | [
"MIT"
] | null | null | null | test/__init__.py | toogy/pendigits-hmm | 03382e1457941714439d40b67e53eaf117fe4d08 | [
"MIT"
] | null | null | null | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
| 14.8 | 51 | 0.662162 | 14 | 74 | 3.5 | 0.571429 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0.108108 | 74 | 4 | 52 | 18.5 | 0.712121 | 0 | 0 | 0 | 0 | 0 | 0.027027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
25ace007eb41f7540d88eb4d9d9c2f7774d2ccda | 41 | py | Python | deepracing_py/deepracing/exceptions/__init__.py | linklab-uva/deepracing | fc25c47658277df029e7399d295d97a75fe85216 | [
"Apache-2.0"
] | 11 | 2020-06-29T15:21:37.000Z | 2021-04-12T00:42:26.000Z | deepracing_py/deepracing/exceptions/__init__.py | linklab-uva/deepracing | fc25c47658277df029e7399d295d97a75fe85216 | [
"Apache-2.0"
] | null | null | null | deepracing_py/deepracing/exceptions/__init__.py | linklab-uva/deepracing | fc25c47658277df029e7399d295d97a75fe85216 | [
"Apache-2.0"
] | 4 | 2019-01-23T23:36:57.000Z | 2021-07-02T00:18:37.000Z | class DeepRacingException(Exception): ... | 41 | 41 | 0.804878 | 3 | 41 | 11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04878 | 41 | 1 | 41 | 41 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
25bcdb887b2b6508495bb4a5590b435282354564 | 44 | py | Python | Mundo 3/ex111/utilidadescev/__init__.py | RafaelSdm/Curso-de-Python | ae933ba80ee00ad5160bd5d05cf4b21007943fd4 | [
"MIT"
] | 1 | 2021-03-10T21:53:38.000Z | 2021-03-10T21:53:38.000Z | Mundo 3/ex112/utilidadescev/__init__.py | RafaelSdm/Curso-de-Python | ae933ba80ee00ad5160bd5d05cf4b21007943fd4 | [
"MIT"
] | null | null | null | Mundo 3/ex112/utilidadescev/__init__.py | RafaelSdm/Curso-de-Python | ae933ba80ee00ad5160bd5d05cf4b21007943fd4 | [
"MIT"
] | null | null | null | from ex111.utilidadescev import moeda, dado | 44 | 44 | 0.840909 | 6 | 44 | 6.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 0.113636 | 44 | 1 | 44 | 44 | 0.871795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
25d6c3e6fa7a63921e9c6c46cac0f6c0f4a47fce | 777 | py | Python | app/urls.py | thexdesk/ESPN-API | debaf328d385c688f90dbb96703244f87da3c100 | [
"MIT"
] | null | null | null | app/urls.py | thexdesk/ESPN-API | debaf328d385c688f90dbb96703244f87da3c100 | [
"MIT"
] | 3 | 2020-06-05T17:12:59.000Z | 2021-06-10T18:09:18.000Z | app/urls.py | thexdesk/ESPN-API | debaf328d385c688f90dbb96703244f87da3c100 | [
"MIT"
] | 1 | 2020-02-09T08:17:18.000Z | 2020-02-09T08:17:18.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<leagueId>[0-9]+)/(?P<year>[0-9]+)/teams/$', views.getTeams),
url(r'^(?P<leagueId>[0-9]+)/(?P<year>[0-9]+)/teams/(?P<teamId>[0-9]+)/$', views.getTeam),
url(r'^(?P<leagueId>[0-9]+)/(?P<year>[0-9]+)/power-rankings/$', views.getPowerRankings),
url(r'^(?P<leagueId>[0-9]+)/(?P<year>[0-9]+)/scoreboard/$', views.getScoreboard),
url(r'^(?P<leagueId>[0-9]+)/teams/$', views.getTeams),
url(r'^(?P<leagueId>[0-9]+)/teams/(?P<teamId>[0-9]+)/$', views.getTeam),
url(r'^(?P<leagueId>[0-9]+)/teams/(?P<teamId>[0-9]+)/history/$', views.getTeamHistory),
url(r'^(?P<leagueId>[0-9]+)/power-rankings/$', views.getPowerRankings),
url(r'^(?P<leagueId>[0-9]+)/scoreboard/$', views.getScoreboard),
] | 48.5625 | 91 | 0.602317 | 122 | 777 | 3.836066 | 0.204918 | 0.068376 | 0.096154 | 0.25 | 0.831197 | 0.711538 | 0.683761 | 0.655983 | 0.655983 | 0.655983 | 0 | 0.044017 | 0.06435 | 777 | 16 | 92 | 48.5625 | 0.599725 | 0 | 0 | 0 | 0 | 0.230769 | 0.542416 | 0.542416 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d349b3c3a757a1de1806a79e04ae9cb4d1476671 | 35 | py | Python | opensenate/parliamentarians/__init__.py | g0ulartleo/opendata-senado | 091d060d55d49f844d192baa1c0aef1aa039f1c0 | [
"MIT"
] | null | null | null | opensenate/parliamentarians/__init__.py | g0ulartleo/opendata-senado | 091d060d55d49f844d192baa1c0aef1aa039f1c0 | [
"MIT"
] | null | null | null | opensenate/parliamentarians/__init__.py | g0ulartleo/opendata-senado | 091d060d55d49f844d192baa1c0aef1aa039f1c0 | [
"MIT"
] | null | null | null | from .senator import SenatorClient
| 17.5 | 34 | 0.857143 | 4 | 35 | 7.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 35 | 1 | 35 | 35 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d36c69306b1f6cccfb85b59f8fe15a647e6867a7 | 214 | py | Python | defining_classes/demo.py | Minkov/python-oop-2020-02 | d2acb1504c1a135cded2ae6ff42acccb303d9ab1 | [
"MIT"
] | 2 | 2020-02-27T18:34:45.000Z | 2020-10-25T17:34:15.000Z | defining_classes/demo.py | Minkov/python-oop-2020-02 | d2acb1504c1a135cded2ae6ff42acccb303d9ab1 | [
"MIT"
] | null | null | null | defining_classes/demo.py | Minkov/python-oop-2020-02 | d2acb1504c1a135cded2ae6ff42acccb303d9ab1 | [
"MIT"
] | null | null | null | from math import pi
class Circle:
def __init__(self, radius):
self.radius = radius
def area(self):
return self.radius * self.radius * pi
c = Circle(5)
print(c.__dict__)
print(c.area())
| 14.266667 | 45 | 0.630841 | 31 | 214 | 4.096774 | 0.516129 | 0.314961 | 0.220472 | 0.314961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00625 | 0.252336 | 214 | 14 | 46 | 15.285714 | 0.7875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0.111111 | 0.111111 | 0.555556 | 0.222222 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
d38a4bf8ce92fcdac6b2fdce750c0f27939d21eb | 6,804 | py | Python | tests/initsync/data_report_initsync_summary.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 16 | 2017-10-31T21:43:26.000Z | 2019-08-11T08:49:06.000Z | tests/initsync/data_report_initsync_summary.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 1 | 2017-11-01T06:25:56.000Z | 2017-11-01T06:25:56.000Z | tests/initsync/data_report_initsync_summary.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 9 | 2017-10-30T05:23:15.000Z | 2022-02-17T03:53:09.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import collections
import data_pipeline.constants.const as const
TestCase = collections.namedtuple('TestCase', "description input_all_table_results expected_subject expected_total_count expected_status expected_min_lsn expected_max_lsn expected_run_id expected_mailing_list")
tests=[
TestCase(
description="Single table success",
input_all_table_results={
'tableA': (123, const.SUCCESS, const.INITSYNCEXTRACT, "foo")
},
expected_total_count=1,
expected_status=const.SUCCESS,
expected_min_lsn=123,
expected_max_lsn=123,
expected_run_id=1,
expected_subject='myprofile InitSync SUCCESS',
expected_mailing_list=set(['someone@gmail.com']),
),
TestCase(
description="Single table success, extractlsn disabled",
input_all_table_results={
'tableA': (None, const.SUCCESS, const.INITSYNCEXTRACT, "foo")
},
expected_total_count=1,
expected_status=const.SUCCESS,
expected_min_lsn=None,
expected_max_lsn=None,
expected_run_id=1,
expected_subject='myprofile InitSync SUCCESS',
expected_mailing_list=set(['someone@gmail.com']),
),
TestCase(
description="Single table error",
input_all_table_results={
'tableA': (123, const.ERROR, const.INITSYNCEXTRACT, "foo")
},
expected_total_count=1,
expected_status=const.ERROR,
expected_min_lsn=123,
expected_max_lsn=123,
expected_run_id=1,
expected_subject='myprofile InitSync ERROR',
expected_mailing_list=set(['someone@gmail.com', 'someone@error.com']),
),
TestCase(
description="Three table success",
input_all_table_results={
'tableA': (123, const.SUCCESS, const.INITSYNCEXTRACT, "foo"),
'tableB': (123, const.SUCCESS, const.INITSYNCEXTRACT, "foo"),
'tableC': (123, const.SUCCESS, const.INITSYNCEXTRACT, "foo"),
},
expected_total_count=3,
expected_status=const.SUCCESS,
expected_min_lsn=123,
expected_max_lsn=123,
expected_run_id=1,
expected_subject='myprofile InitSync SUCCESS',
expected_mailing_list=set(['someone@gmail.com']),
),
TestCase(
description="Three table error",
input_all_table_results={
'tableA': (123, const.ERROR, const.INITSYNCEXTRACT, "foo"),
'tableB': (123, const.ERROR, const.INITSYNCEXTRACT, "foo"),
'tableC': (123, const.ERROR, const.INITSYNCEXTRACT, "foo"),
},
expected_total_count=3,
expected_status=const.ERROR,
expected_min_lsn=123,
expected_max_lsn=123,
expected_run_id=1,
expected_subject='myprofile InitSync ERROR',
expected_mailing_list=set(['someone@gmail.com', 'someone@error.com']),
),
TestCase(
description="Error on last table resulting in warning",
input_all_table_results={
'tableA': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableB': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableC': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableD': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableE': (123, const.ERROR, const.INITSYNCAPPLY, "foo"),
},
expected_total_count=5,
expected_status=const.WARNING,
expected_min_lsn=123,
expected_max_lsn=123,
expected_run_id=1,
expected_subject='myprofile InitSync WARNING (4 out of 5)',
expected_mailing_list=set(['someone@gmail.com', 'someone@error.com']),
),
TestCase(
description="Error on second last table resulting in warning",
input_all_table_results={
'tableA': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableB': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableC': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableD': (123, const.ERROR, const.INITSYNCAPPLY, "foo"),
'tableE': (123, const.SUCCESS, const.INITSYNC, "foo"),
},
expected_total_count=5,
expected_status=const.WARNING,
expected_min_lsn=123,
expected_max_lsn=123,
expected_run_id=1,
expected_subject='myprofile InitSync WARNING (4 out of 5)',
expected_mailing_list=set(['someone@gmail.com', 'someone@error.com']),
),
TestCase(
description="Error on first table resulting in warning",
input_all_table_results={
'tableA': (123, const.ERROR, const.INITSYNCAPPLY, "foo"),
'tableB': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableC': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableD': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableE': (123, const.SUCCESS, const.INITSYNC, "foo"),
},
expected_total_count=5,
expected_status=const.WARNING,
expected_min_lsn=123,
expected_max_lsn=123,
expected_run_id=1,
expected_subject='myprofile InitSync WARNING (4 out of 5)',
expected_mailing_list=set(['someone@gmail.com', 'someone@error.com']),
),
TestCase(
description="Error on middle table resulting in warning",
input_all_table_results={
'tableA': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableB': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableC': (123, const.ERROR, const.INITSYNCAPPLY, "foo"),
'tableD': (123, const.SUCCESS, const.INITSYNC, "foo"),
'tableE': (123, const.SUCCESS, const.INITSYNC, "foo"),
},
expected_total_count=5,
expected_status=const.WARNING,
expected_min_lsn=123,
expected_max_lsn=123,
expected_run_id=1,
expected_subject='myprofile InitSync WARNING (4 out of 5)',
expected_mailing_list=set(['someone@gmail.com', 'someone@error.com']),
),
]
| 40.023529 | 210 | 0.641681 | 783 | 6,804 | 5.383142 | 0.167305 | 0.053144 | 0.084698 | 0.094899 | 0.785528 | 0.769395 | 0.72669 | 0.72669 | 0.72669 | 0.717675 | 0 | 0.031383 | 0.241329 | 6,804 | 169 | 211 | 40.260355 | 0.785161 | 0.110817 | 0 | 0.732394 | 0 | 0 | 0.207594 | 0.007296 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.014085 | 0 | 0.014085 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d3997bdf79040050d8c8df1e6fd35c04b1a7fa9d | 146 | py | Python | mysite/polls/views.py | cs-fullstack-fall-2018/django-intro1-RoyzellW | 1e011df68e9d2533a55be83b3b0c3a82ee854a8e | [
"Apache-2.0"
] | null | null | null | mysite/polls/views.py | cs-fullstack-fall-2018/django-intro1-RoyzellW | 1e011df68e9d2533a55be83b3b0c3a82ee854a8e | [
"Apache-2.0"
] | null | null | null | mysite/polls/views.py | cs-fullstack-fall-2018/django-intro1-RoyzellW | 1e011df68e9d2533a55be83b3b0c3a82ee854a8e | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
# Create your views here.
def index(request):
return HttpResponse("This is broken, try something else.")
| 24.333333 | 62 | 0.760274 | 20 | 146 | 5.55 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157534 | 146 | 5 | 63 | 29.2 | 0.902439 | 0.157534 | 0 | 0 | 0 | 0 | 0.289256 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
4cb6c57c0f7a17520a9ed625c51ba885556b15bd | 7,236 | py | Python | tests/callbacks/test_torch_scheduler.py | Ddaniela13/torchbearer | 89c2724b76f3a85065ea79598aece4b2c3c5f7fb | [
"MIT"
] | null | null | null | tests/callbacks/test_torch_scheduler.py | Ddaniela13/torchbearer | 89c2724b76f3a85065ea79598aece4b2c3c5f7fb | [
"MIT"
] | null | null | null | tests/callbacks/test_torch_scheduler.py | Ddaniela13/torchbearer | 89c2724b76f3a85065ea79598aece4b2c3c5f7fb | [
"MIT"
] | null | null | null | from unittest import TestCase
from mock import patch, Mock
import torchbearer
from torchbearer.callbacks import TorchScheduler, LambdaLR, StepLR, MultiStepLR, ExponentialLR, CosineAnnealingLR,\
ReduceLROnPlateau
class TestTorchScheduler(TestCase):
def test_torch_scheduler_on_batch_with_monitor(self):
state = {torchbearer.EPOCH: 1, torchbearer.METRICS: {'test': 101}, torchbearer.OPTIMIZER: 'optimizer'}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor='test', step_on_batch=True)
torch_scheduler.on_start(state)
mock_scheduler.assert_called_once_with('optimizer')
mock_scheduler.reset_mock()
torch_scheduler.on_start_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_sample(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_step_training(state)
mock_scheduler.step.assert_called_once_with(101)
mock_scheduler.reset_mock()
torch_scheduler.on_end_epoch(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
def test_torch_scheduler_on_epoch_with_monitor(self):
state = {torchbearer.EPOCH: 1, torchbearer.METRICS: {'test': 101}, torchbearer.OPTIMIZER: 'optimizer'}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor='test', step_on_batch=False)
torch_scheduler.on_start(state)
mock_scheduler.assert_called_once_with('optimizer')
mock_scheduler.reset_mock()
torch_scheduler.on_start_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_sample(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_step_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_end_epoch(state)
mock_scheduler.step.assert_called_once_with(101, epoch=1)
mock_scheduler.reset_mock()
def test_torch_scheduler_on_batch_no_monitor(self):
state = {torchbearer.EPOCH: 1, torchbearer.OPTIMIZER: 'optimizer'}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor=None, step_on_batch=True)
torch_scheduler.on_start(state)
mock_scheduler.assert_called_once_with('optimizer')
mock_scheduler.reset_mock()
torch_scheduler.on_start_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_sample(state)
mock_scheduler.step.assert_called_once_with()
mock_scheduler.reset_mock()
torch_scheduler.on_step_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_end_epoch(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
def test_torch_scheduler_on_epoch_no_monitor(self):
state = {torchbearer.EPOCH: 1, torchbearer.OPTIMIZER: 'optimizer'}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor=None, step_on_batch=False)
torch_scheduler.on_start(state)
mock_scheduler.assert_called_once_with('optimizer')
mock_scheduler.reset_mock()
torch_scheduler.on_start_training(state)
mock_scheduler.step.assert_called_once_with(epoch=1)
mock_scheduler.reset_mock()
torch_scheduler.on_sample(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_step_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_end_epoch(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
class TestLambdaLR(TestCase):
@patch('torch.optim.lr_scheduler.LambdaLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = LambdaLR(0.1, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 0.1, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestStepLR(TestCase):
@patch('torch.optim.lr_scheduler.StepLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = StepLR(10, gamma=0.4, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 10, gamma=0.4, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestMultiStepLR(TestCase):
@patch('torch.optim.lr_scheduler.MultiStepLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = MultiStepLR(10, gamma=0.4, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 10, gamma=0.4, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestExponentialLR(TestCase):
@patch('torch.optim.lr_scheduler.ExponentialLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = ExponentialLR(0.4, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 0.4, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestCosineAnnealingLR(TestCase):
@patch('torch.optim.lr_scheduler.CosineAnnealingLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = CosineAnnealingLR(4, eta_min=10, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 4, eta_min=10, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestReduceLROnPlateau(TestCase):
@patch('torch.optim.lr_scheduler.ReduceLROnPlateau')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = ReduceLROnPlateau(monitor='test', mode='max', factor=0.2, patience=100, verbose=True, threshold=10,
threshold_mode='thresh', cooldown=5, min_lr=0.1, eps=1e-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', mode='max', factor=0.2, patience=100, verbose=True, threshold=10,
threshold_mode='thresh', cooldown=5, min_lr=0.1, eps=1e-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
self.assertTrue(scheduler._monitor == 'test')
| 37.6875 | 119 | 0.708264 | 882 | 7,236 | 5.451247 | 0.089569 | 0.151414 | 0.079867 | 0.091514 | 0.895383 | 0.894759 | 0.84297 | 0.839226 | 0.826123 | 0.800541 | 0 | 0.014017 | 0.191542 | 7,236 | 191 | 120 | 37.884817 | 0.807863 | 0 | 0 | 0.686131 | 0 | 0 | 0.069652 | 0.03068 | 0 | 0 | 0 | 0 | 0.240876 | 1 | 0.072993 | false | 0 | 0.029197 | 0 | 0.153285 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4ce147c505ab00aaa9c2199911991ce54819aa27 | 3,394 | py | Python | tests/unit_tests/test_michelson/test_repl/test_chest.py | m-kus/pytezos | dfb7e34a4ca24b5cf40541900c5f761c61571996 | [
"MIT"
] | null | null | null | tests/unit_tests/test_michelson/test_repl/test_chest.py | m-kus/pytezos | dfb7e34a4ca24b5cf40541900c5f761c61571996 | [
"MIT"
] | null | null | null | tests/unit_tests/test_michelson/test_repl/test_chest.py | m-kus/pytezos | dfb7e34a4ca24b5cf40541900c5f761c61571996 | [
"MIT"
] | null | null | null | from unittest import TestCase, skip
from pytezos.contract.interface import ContractInterface
source = """
storage (bytes);
parameter (pair (chest_key) (chest));
code {
UNPAIR;
DIP {DROP};
UNPAIR;
DIIP {PUSH nat 1000};
OPEN_CHEST;
IF_LEFT
{ # successful case
NIL operation;
PAIR ;
}
{
IF
{ # first type of failure
PUSH bytes 0x01;
NIL operation;
PAIR;
}
{ # second type of failure
PUSH bytes 0x00;
NIL operation;
PAIR;
}
}
}
"""
chest_key = bytes.fromhex(
'ac91a7a0efcd9e97bdc29f8c8184e3c5d8dbea9eb284e1a3fdd9bafa9c8380d5f793cbb4ac869cfbafac82c7f896a991a48ef48bb79189a2a997a3bee183b4f8dce0d4e781869ac0d5ab8ad3a48894bfb690b4d6b3b1a7fdcbf6f39a87bac7b59cf9a3b9d1d2d09ea1ca8af9fee7acac82e89aeea09ee7a1acf38dddc2d8bdb9e6ffced5da9cb4d284d9f692d29bc28cadc6ead09bd9b2ffe8ccb392ef8c96e9b7a3d3c0b9caceb3dee6b9dcefb5ff98a9e98186edb69bbbdec8f48490c897ecd5d0dbc587dedac3fba6e2beb4d6f5e2e3c9e4dfdbfcc0b182dff283efbfdbdba997c0c6bdcedc85d5e19a89bf9c8484dfe3c0d1deb08ceffb96bad9edffbfe4f6dfb882b1eddcbdbef39297b0b4d19bfb988185859ba795f9dfffc6b5f4b6e8d6fbe28303c1fadc829d9bf1a992cae0c3c2acd7a2bb8bbe93cb87b7e1aec883aada82c7fac889f1dfe6e2bab8f18f9087aebffef0b3ab8bfed9a5be84a9a7bcabf2dfb0c4d399b7b3dda9deadecf0c1e491cac4a49e8ca5aadebef39dc4dae8b1d3bb93c8ebb0f6c586c0d7cba1cbfccaabf0cbdd95969fa4b3f59af3fa8ca0f1a0f6d09b958eaad8fcd595d1defee1c989b5cadcb9f8a7bfaea1e2969899ee83ffd8e683f9dec4decfe0aff1d5c2dcd3abb6c8c6bce4c8e2f79eb6fde3b8a3a3add5fad4e5ebc8de93ae909ffb8cccc3e6a9ed8ed6b8cd88cab29086d4e8f99be4bebf84f494b0f5fce4e0c4d092e4ce80cca0a7c69fb2bef380ece0e189d9ede0eac9f5a4a5d2f79a9094dbdf91add38b92e5cbeea680effcea83eee7b697c593ab97adb1eff48402')
chest = bytes.fromhex(
'bea8a8b993f2fdb2c3d7f8a9b4e2bba3def2edd5928a82878a81ace6b8e2c0efc5c7dbe9e6b88bca86b0df94f6b5c4d2d4f7f6e9a183fde1edd2b3fc9d9f9c8de6b7c1e580bdb284d7eca9e485cb84b8e386bfa09fe297c5df94eacdd9c090e6eab39aa6ffa69df9fabec2d6b5ba94a6bec4c387dae38ec6b7bdf793e1edb8d2c5e49bd7c1ba84b69afe89d3bd9799bfadfec2e2ddcc88b8d2e0a99f9cc9b0deb682b1c6c8d1bea2e695b2ebd1a6d8eebeddeea3a4b2d983d6cc9cd1a8d0e0c4f4cb8fffb9ddd1f9abb4dbc3ee808cf1cbbd91c7e4859eecfad5b2add3d4b8dae7e0fdabc9f0b29ac78784b7bd8bcaed91ca93cb95ccd79ac8d8b184d1f4f8b0fed1d5d3f3b1ed9dfcd5f483b5a581d79ef5cbbe98889b80bd80f0f9fdd5f3bed5f38653a7f490dddec8d782d2b2b8c1bc9999859fbbc2dd97ed9df4b5b9879588c8ea93c4bfbcaed1efeac4e8bdcab1c3818fa8e8e8b3c6978cabf08c8daddaa2fbbf81d88fda95cecb8591fd90d98ad3b29698c5a4e3ac8e95f7dba0ff91a6ff97d1e1f8c9fb9ef6afae95ac908bb4b9b3b8f8ed8780bfbac6f39cf1f7cab980abcacedeac90afe5bfcda8dab990ffb3a2ad9b889e94e8b6d1f099f5cef7dbacd799e0f2ccf9e7b7c6e591bddeee8895cc89f2d9839ef0afe08ed783c7869685f5fca5cdebf9889ef2839a8ebd88eeb8ebfbd5dab8a4ec86a6a488b1b6f8fe828b8fefaaf9dbd6ddddaaeea4d8e6d5fca2dfdd9af1bdeca1bcf09ab898a49dcbc9f3f99f83fdb690c7cbb7cff5cbca88eafe8ff5eec980aadbe4c2be87b7b098adc3bfd6b1b3a106e1cae5665a7f70a26b8e06979288e26222009d7b6e40acb900000021a1fb7e9f43f45b19d4a5ed10cf729c233612d82ea642e09efd90873e66952e97bb')
@skip
class OpenChestTestCase(TestCase):
def test_open_chest(self):
ci = ContractInterface.from_michelson(source)
ci.call(chest_key, chest).interpret()
| 72.212766 | 1,299 | 0.864467 | 87 | 3,394 | 33.632184 | 0.574713 | 0.008202 | 0.016405 | 0.01162 | 0.015038 | 0 | 0 | 0 | 0 | 0 | 0 | 0.372926 | 0.111962 | 3,394 | 46 | 1,300 | 73.782609 | 0.597877 | 0 | 0 | 0.2 | 0 | 0 | 0.895993 | 0.725987 | 0 | 1 | 0.002357 | 0 | 0 | 1 | 0.025 | false | 0 | 0.05 | 0 | 0.1 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4ce720597fdb62dfff722e3c7d80c43be3dfe355 | 2,164 | py | Python | blender/2.79/scripts/addons/add_curve_sapling/presets/japanese_maple.py | uzairakbar/bpy2.79 | 3a3e0004ac6783c4e4b89d939e4432de99026a85 | [
"MIT"
] | 2 | 2019-11-27T09:05:42.000Z | 2020-02-20T01:25:23.000Z | add_curve_sapling/presets/japanese_maple.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | null | null | null | add_curve_sapling/presets/japanese_maple.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | 4 | 2020-02-19T20:02:26.000Z | 2022-02-11T18:47:56.000Z | {'leafScaleT': -0.5, 'shapeS': '10', 'scaleV': 2.0, 'resU': 4, 'boneStep': (1, 1, 1, 1), 'af3': 4.0, 'baseSize': 0.4000000059604645, 'prunePowerLow': 0.0010000000474974513, 'leafRotateV': 0.0, 'rootFlare': 1.0, 'customShape': (0.699999988079071, 1.0, 0.20000000298023224, 0.800000011920929), 'attractOut': (0.0, 0.75, 0.25, 0.0), 'useArm': False, 'branches': (0, 50, 10, 16), 'leafDownAngle': 45.0, 'length': (1.0, 0.30000001192092896, 0.5, 0.20000000298023224), 'segSplits': (0.25, 0.4000000059604645, 0.5, 0.0), 'makeMesh': False, 'curveV': (400.0, 150.0, 100.0, 0.0), 'curveBack': (0.0, 0.0, 0.0, 0.0), 'af1': 1.0, 'closeTip': False, 'frameRate': 1.0, 'leafangle': -10.0, 'af2': 1.0, 'rMode': 'rotate', 'leafScaleV': 0.0, 'rotateV': (15.0, 0.0, 0.0, 0.0), 'useParentAngle': False, 'taperCrown': 0.0, 'minRadius': 0.001500000013038516, 'splitAngleV': (5.0, 5.0, 0.0, 0.0), 'scaleV0': 0.10000000149011612, 'bevel': True, 'leafDownAngleV': 10.0, 'previewArm': False, 'showLeaves': True, 'ratioPower': 1.25, 'handleType': '0', 'branchDist': 1.0, 'leafScaleX': 0.20000000298023224, 'prune': False, 'splitHeight': 0.30000001192092896, 'baseSplits': 2, 'baseSize_s': 0.25, 'downAngle': (90.0, 90.0, 30.0, 30.0), 'bevelRes': 1, 'leafAnim': False, 'loopFrames': 0, 'lengthV': (0.0, 0.0, 0.0, 0.0), 'gust': 1.0, 'downAngleV': (0.0, 90.0, 15.0, 10.0), 'leafRotate': 137.5, 'wind': 1.0, 'leaves': -5, 'curve': (0.0, -20.0, -20.0, 0.0), 'radiusTweak': (1.0, 1.0, 1.0, 1.0), 'pruneRatio': 1.0, 'pruneBase': 0.30000001192092896, 'armAnim': False, 'splitBias': 0.0, 'rotate': (99.5, 137.5, 137.5, 137.5), 'armLevels': 2, 'scale': 6.0, 'prunePowerHigh': 0.5, 'nrings': 0, 'splitByLen': True, 'leafShape': 'hex', 'splitAngle': (15.0, 20.0, 25.0, 0.0), 'ratio': 0.019999999552965164, 'scale0': 1.0, 'autoTaper': True, 'pruneWidth': 0.4000000059604645, 'leafScale': 0.17000000178813934, 'seed': 0, 'curveRes': (16, 5, 3, 1), 'horzLeaves': True, 'useOldDownAngle': False, 'levels': 4, 'pruneWidthPeak': 0.6000000238418579, 'attractUp': (0.0, -0.3499999940395355, -0.20000000298023224, 0.0), 'taper': (1.0, 1.0, 1.0, 1.0), 'leafDist': '6', 'gustF': 0.07500000298023224, 'shape': '8'} | 2,164 | 2,164 | 0.633549 | 335 | 2,164 | 4.089552 | 0.370149 | 0.065693 | 0.054745 | 0.046715 | 0.041606 | 0.028467 | 0.028467 | 0.023358 | 0 | 0 | 0 | 0.306977 | 0.105823 | 2,164 | 1 | 2,164 | 2,164 | 0.401034 | 0 | 0 | 0 | 0 | 0 | 0.333487 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e2635abc229517d1d5fec156db782537421fce5a | 12,331 | py | Python | test/orm/test_session_state_change.py | petit87/sqlalchemy | 67d674bd63ca36ac32b23f96e2b19e9dac6b0863 | [
"MIT"
] | null | null | null | test/orm/test_session_state_change.py | petit87/sqlalchemy | 67d674bd63ca36ac32b23f96e2b19e9dac6b0863 | [
"MIT"
] | null | null | null | test/orm/test_session_state_change.py | petit87/sqlalchemy | 67d674bd63ca36ac32b23f96e2b19e9dac6b0863 | [
"MIT"
] | 1 | 2022-02-28T20:16:29.000Z | 2022-02-28T20:16:29.000Z | from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import state_changes
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
class StateTestChange(state_changes._StateChangeState):
a = 1
b = 2
c = 3
class StateMachineTest(fixtures.TestBase):
def test_single_change(self):
"""test single method that declares and invokes a state change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.b
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_single_incorrect_change(self):
"""test single method that declares a state change but changes to the
wrong state."""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.c
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method 'move_to_b\(\)' "
r"caused an unexpected state change to <StateTestChange.c: 3>",
):
m.move_to_b()
def test_single_failed_to_change(self):
"""test single method that declares a state change but didn't do
the change."""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
pass
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method 'move_to_b\(\)' failed to change state "
"to <StateTestChange.b: 2> as "
"expected",
):
m.move_to_b()
def test_change_from_sub_method_with_declaration(self):
"""test successful state change by one method calling another that
does the change.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_b()
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_method_and_sub_method_no_change(self):
"""test methods that declare the state should not change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def _inner_do_nothing(self):
pass
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def do_nothing(self):
self._inner_do_nothing()
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
m.do_nothing()
eq_(m._state, StateTestChange.a)
def test_method_w_no_change_illegal_inner_change(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.c
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def do_nothing(self):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method '_inner_move_to_c\(\)' can't be called here; "
r"method 'do_nothing\(\)' is already in progress and this "
r"would cause an unexpected state change to "
"<StateTestChange.c: 3>",
):
m.do_nothing()
eq_(m._state, StateTestChange.a)
def test_change_from_method_sub_w_no_change(self):
"""test methods that declare the state should not change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def _inner_do_nothing(self):
pass
@state_changes._StateChange.declare_states(
(StateTestChange.a,), StateTestChange.b
)
def move_to_b(self):
self._inner_do_nothing()
self._state = StateTestChange.b
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_invalid_change_from_declared_sub_method_with_declaration(self):
"""A method uses _expect_state() to call a sub-method, which must
declare that state as its destination if no exceptions are raised.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
# method declares StateTestChange.c so can't be called under
# expect_state(StateTestChange.b)
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.c
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Cant run operation '_inner_move_to_c\(\)' here; will move "
r"to state <StateTestChange.c: 3> where we are "
"expecting <StateTestChange.b: 2>",
):
m.move_to_b()
def test_invalid_change_from_invalid_sub_method_with_declaration(self):
"""A method uses _expect_state() to call a sub-method, which must
declare that state as its destination if no exceptions are raised.
Test an error is raised if the sub-method doesn't change to the
correct state.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
# method declares StateTestChange.b, but is doing the wrong
# change, so should fail under expect_state(StateTestChange.b)
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"While method 'move_to_b\(\)' was running, method "
r"'_inner_move_to_c\(\)' caused an unexpected state change "
"to <StateTestChange.c: 3>",
):
m.move_to_b()
def test_invalid_prereq_state(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.c,), "d"
)
def move_to_d(self):
self._state = "d"
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Can't run operation 'move_to_d\(\)' when "
"Session is in state <StateTestChange.b: 2>",
):
m.move_to_d()
def test_declare_only(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._move_to_b()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
AssertionError,
"Unexpected call to _expect_state outside of "
"state-changing method",
):
m.move_to_b()
def test_sibling_calls_maintain_correct_state(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, StateTestChange.c
)
def move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, _NO_CHANGE
)
def do_nothing(self):
pass
m = Machine()
m.do_nothing()
eq_(m._state, _NO_CHANGE)
m.move_to_c()
eq_(m._state, StateTestChange.c)
def test_change_from_sub_method_requires_declaration(self):
"""A method can't call another state-changing method without using
_expect_state() to allow the state change to occur.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._inner_move_to_b()
m = Machine()
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method '_inner_move_to_b\(\)' can't be called here; "
r"method 'move_to_b\(\)' is already in progress and this would "
r"cause an unexpected state change to <StateTestChange.b: 2>",
):
m.move_to_b()
| 35.536023 | 77 | 0.607656 | 1,361 | 12,331 | 5.115356 | 0.098457 | 0.067797 | 0.115628 | 0.0948 | 0.834243 | 0.827923 | 0.797472 | 0.768026 | 0.757254 | 0.740305 | 0 | 0.001297 | 0.312465 | 12,331 | 346 | 78 | 35.638728 | 0.819887 | 0.086692 | 0 | 0.694656 | 0 | 0 | 0.083176 | 0.007922 | 0 | 0 | 0 | 0 | 0.003817 | 1 | 0.137405 | false | 0.015267 | 0.019084 | 0 | 0.225191 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e26452fe4d62f2442629588e7aeb574618c3bc8c | 46 | py | Python | examples/restAPI/my_project/config/settings/local.py | emilioag/django_rest_coreapi_schema | 9b95f01311f0ba3f936762ba19c96c8a94f1f91f | [
"MIT"
] | null | null | null | examples/restAPI/my_project/config/settings/local.py | emilioag/django_rest_coreapi_schema | 9b95f01311f0ba3f936762ba19c96c8a94f1f91f | [
"MIT"
] | 3 | 2020-06-05T16:39:19.000Z | 2021-06-10T18:06:23.000Z | examples/restAPI/my_project/config/settings/local.py | emilioag/django_rest_coreapi_schema | 9b95f01311f0ba3f936762ba19c96c8a94f1f91f | [
"MIT"
] | null | null | null | from .base import *
print("Settings: LOCAL")
| 11.5 | 24 | 0.695652 | 6 | 46 | 5.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152174 | 46 | 3 | 25 | 15.333333 | 0.820513 | 0 | 0 | 0 | 0 | 0 | 0.326087 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
e29d9438f2046b1b8603b9ad5110651fb9a06d8a | 9,143 | py | Python | packages/task-scheduler/task_scheduler/routes/tasks/test_tasks.py | baviera08/romi-dashboard | ac3a15014ad3c3bdac523a6550934a06653cfba1 | [
"Apache-2.0"
] | null | null | null | packages/task-scheduler/task_scheduler/routes/tasks/test_tasks.py | baviera08/romi-dashboard | ac3a15014ad3c3bdac523a6550934a06653cfba1 | [
"Apache-2.0"
] | 1 | 2020-12-01T20:25:32.000Z | 2020-12-01T20:25:32.000Z | packages/task-scheduler/task_scheduler/routes/tasks/test_tasks.py | baviera08/romi-dashboard | ac3a15014ad3c3bdac523a6550934a06653cfba1 | [
"Apache-2.0"
] | null | null | null | # import asyncio
# import concurrent.futures
# from rmf_task_msgs.msg import TaskSummary as RmfTaskSummary
# from rmf_task_msgs.msg import TaskType as RmfTaskType
# from rmf_task_msgs.srv import CancelTask as RmfCancelTask
# from rmf_task_msgs.srv import SubmitTask as RmfSubmitTask
# from ...models import CancelTask, CleanTaskDescription, SubmitTask, TaskSummary
# from ...models import tortoise_models as ttm
# from ..test_fixtures import RouteFixture
# class TestTasksRoute(RouteFixture):
# def test_submit_task_request(self):
# # create a submit task request message
# task = SubmitTask(
# task_type=RmfTaskType.TYPE_CLEAN,
# start_time=0,
# description=CleanTaskDescription(cleaning_zone="zone_2"),
# priority=0,
# )
# fut = self.host_service_one(
# RmfSubmitTask, "submit_task", RmfSubmitTask.Response(success=True)
# )
# resp = self.session.post(f"{self.base_url}/tasks/submit_task", data=task.json())
# self.assertEqual(resp.status_code, 200)
# ros_received: RmfSubmitTask.Request = fut.result(3)
# self.assertEqual(ros_received.requester, "rmf_server")
# def test_cancel_task_request(self):
# cancel_task = CancelTask(task_id="test_task")
# fut = self.host_service_one(
# RmfCancelTask, "cancel_task", RmfCancelTask.Response(success=True)
# )
# resp = self.session.post(
# f"{self.base_url}/tasks/cancel_task", data=cancel_task.json()
# )
# self.assertEqual(resp.status_code, 200)
# received: RmfCancelTask.Request = fut.result(3)
# self.assertEqual(received.task_id, "test_task")
# def test_cancel_task_failure(self):
# cancel_task = CancelTask(task_id="test_task")
# fut = self.host_service_one(
# RmfCancelTask,
# "cancel_task",
# RmfCancelTask.Response(success=False, message="test error"),
# )
# resp = self.session.post(
# f"{self.base_url}/tasks/cancel_task", data=cancel_task.json()
# )
# self.assertEqual(resp.status_code, 500)
# fut.result(3)
# self.assertEqual(resp.json()["detail"], "test error")
# def test_query_tasks(self):
# dataset = [
# TaskSummary(
# task_id="task_1",
# fleet_name="fleet_1",
# submission_time={"sec": 1000, "nanosec": 0},
# start_time={"sec": 2000, "nanosec": 0},
# end_time={"sec": 3000, "nanosec": 0},
# robot_name="robot_1",
# state=RmfTaskSummary.STATE_COMPLETED,
# task_profile={
# "description": {
# "task_type": {"type": RmfTaskType.TYPE_LOOP},
# "priority": {"value": 0},
# }
# },
# ),
# TaskSummary(
# task_id="task_2",
# fleet_name="fleet_2",
# submission_time={"sec": 4000, "nanosec": 0},
# start_time={"sec": 5000, "nanosec": 0},
# end_time={"sec": 6000, "nanosec": 0},
# robot_name="robot_2",
# state=RmfTaskSummary.STATE_ACTIVE,
# task_profile={
# "description": {
# "task_type": {"type": RmfTaskType.TYPE_DELIVERY},
# "priority": {"value": 1},
# }
# },
# ),
# ]
# fut = concurrent.futures.Future()
# async def save_data():
# fut.set_result(
# await asyncio.gather(
# *(ttm.TaskSummary.save_pydantic(data) for data in dataset)
# )
# )
# self.server.app.wait_ready()
# self.server.app.loop.create_task(save_data())
# fut.result()
# resp = self.session.get(f"{self.base_url}/tasks?task_id=task_1,task_2")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 2)
# resp = self.session.get(f"{self.base_url}/tasks?fleet_name=fleet_1")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# self.assertEqual(items[0]["task_summary"]["fleet_name"], "fleet_1")
# resp = self.session.get(f"{self.base_url}/tasks?robot_name=robot_1")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# self.assertEqual(items[0]["task_summary"]["robot_name"], "robot_1")
# resp = self.session.get(f"{self.base_url}/tasks?state=completed")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# self.assertEqual(
# items[0]["task_summary"]["state"], RmfTaskSummary.STATE_COMPLETED
# )
# resp = self.session.get(f"{self.base_url}/tasks?task_type=loop")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# self.assertEqual(
# items[0]["task_summary"]["task_profile"]["description"]["task_type"][
# "type"
# ],
# RmfTaskType.TYPE_LOOP,
# )
# resp = self.session.get(f"{self.base_url}/tasks?priority=0")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# resp = self.session.get(f"{self.base_url}/tasks?submission_time_since=4000")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_2")
# resp = self.session.get(f"{self.base_url}/tasks?start_time_since=5000")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_2")
# resp = self.session.get(f"{self.base_url}/tasks?end_time_since=6000")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_2")
# # test no match
# resp = self.session.get(
# f"{self.base_url}/tasks?fleet_name=fleet_1&start_time_since=5000"
# )
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 0)
# # no query returns everything
# resp = self.session.get(f"{self.base_url}/tasks")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 2)
# def test_get_task_summary(self):
# dataset = [
# TaskSummary(
# task_id="task_1",
# fleet_name="fleet_1",
# submission_time={"sec": 1000, "nanosec": 0},
# start_time={"sec": 2000, "nanosec": 0},
# end_time={"sec": 3000, "nanosec": 0},
# robot_name="robot_1",
# state=RmfTaskSummary.STATE_COMPLETED,
# task_profile={
# "description": {
# "task_type": {"type": RmfTaskType.TYPE_LOOP},
# "priority": {"value": 0},
# }
# },
# ),
# ]
# fut = concurrent.futures.Future()
# async def save_data():
# fut.set_result(
# await asyncio.gather(
# *(ttm.TaskSummary.save_pydantic(data) for data in dataset)
# )
# )
# self.server.app.wait_ready()
# self.server.app.loop.create_task(save_data())
# fut.result()
# resp = self.session.get(f"{self.base_url}/tasks/task_1/summary")
# self.assertEqual(200, resp.status_code)
# resp_json = resp.json()
# self.assertEqual("task_1", resp_json["task_id"])
| 39.925764 | 90 | 0.552116 | 998 | 9,143 | 4.841683 | 0.122244 | 0.130381 | 0.059189 | 0.037252 | 0.762003 | 0.740066 | 0.706954 | 0.706954 | 0.687914 | 0.662459 | 0 | 0.025858 | 0.302089 | 9,143 | 228 | 91 | 40.100877 | 0.73139 | 0.952532 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e2b581f33f97ed0ca005f6c01b0c8ee8c995b201 | 10,030 | py | Python | zerver/webhooks/cofazure/view.py | CatarinaSMorais/zulip | e943d717b84291397328bd4dc578c04eed21885e | [
"Apache-2.0"
] | 1 | 2021-08-10T07:31:27.000Z | 2021-08-10T07:31:27.000Z | zerver/webhooks/cofazure/view.py | CatarinaSMorais/zulip | e943d717b84291397328bd4dc578c04eed21885e | [
"Apache-2.0"
] | 1 | 2021-08-05T14:46:02.000Z | 2021-08-05T14:46:02.000Z | zerver/webhooks/cofazure/view.py | CatarinaSMorais/zulip | e943d717b84291397328bd4dc578c04eed21885e | [
"Apache-2.0"
] | 1 | 2021-08-05T14:27:13.000Z | 2021-08-05T14:27:13.000Z | # Webhooks for external integrations.
from typing import Any, Dict, Iterable
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
from zerver.lib.webhooks.bodyfunctions import BuildandRelease as br
from zerver.lib.webhooks.bodyfunctions import code as cd
from zerver.lib.webhooks.bodyfunctions import pipelines as pl
from zerver.lib.webhooks.bodyfunctions import workitems as wk
"""
Mapeamento do eventtype devolvido pelo GIT
A cada eventtype está a associada uma função que devolve o body e o topic da mensagem a publicar
As funcoes encontram-se num outro ficheiro chamado bodyfunctions
"""
EVENT_FUNCTION_MAPPER:Dict[str, Dict[str, Any]] ={
"ms.vss-release.deployment-started-event": {"Function":br.release_deployment_started_body,
"Active":True},
"build.complete": {"Function": br.build_completed_body,
"Active": True},
"ms.vss-release.release-abandoned-event": {"Function": br.release_abandoned_body,
"Active": True},
"ms.vss-release.release-created-event": {"Function": br.release_created_body,
"Active": True},
"ms.vss-release.deployment-approval-completed-event": {"Function": br.release_deployment_approval_completed_body,
"Active": True},
"ms.vss-release.deployment-approval-pending-event": {"Function": br.release_deployment_approval_pending_body,
"Active": True},
"git.pullrequest.merged": {"Function": cd.pull_request_merged_body,
"Active": True},
"git.pullrequest.updated": {"Function": cd.pull_request_updated_body,
"Active": True},
"tfvc.checkin": {"Function": cd.checkin_body,
"Active": True},
"git.push": {"Function": cd.push_body,
"Active": True},
"git.pullrequest.created": {"Function": cd.pull_request_created_body,
"Active": True},
"ms.vss-pipelines.run-state-changed-event": {"Function": pl.run_state_changed_body,
"Active": True},
"ms.vss-pipelinechecks-events.approval-completed": {"Function": pl.run_stage_approval_completed_body,
"Active": True},
"ms.vss-pipelinechecks-events.approval-pending": {"Function": pl.run_stage_waiting_for_approval_body,
"Active": True},
"ms.vss-pipelines.stage-state-changed-event": {"Function": pl.run_stage_state_changed_body,
"Active": True},
"workitem.restored": {"Function": wk.work_item_restored_body,
"Active": True},
"workitem.updated": {"Function": wk.work_item_updated_body,
"Active": True},
"workitem.commented": {"Function": wk.work_item_commented_body,
"Active": True},
"workitem.created": {"Function": wk.work_item_created_body,
"Active": True},
"workitem.deleted": {"Function": wk.work_item_deleted_body,
"Active": True}
}
@webhook_view('CofAzure')
@has_request_variables
def api_cofazure_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Iterable[Dict[str, Any]]]=REQ(argument_type='body'),
) -> HttpResponse:
event = payload['eventType']
if event is None:
# Helper.log_unsupported(event)
return json_success()
# Retira a função a executrar da lista configurada mais acima
body_function = EVENT_FUNCTION_MAPPER[event]['Functions']
# construct the body of the message
body = ''
# try to add the Wikipedia article of the day
body_template = 'Nova mensagem do azure : {detailedMessage}'
body += body_template.format(detailedMessage=payload['detailedMessage']['text'])
topic = payload['eventType']
# send the message
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
"""
from typing import Any, Callable, Dict, Iterable
from django.http import HttpRequest, HttpResponse
from zerver.decorator import log_exception_to_webhook_logger, webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
from zerver.lib.webhooks.bodyfunctions import BuildandRelease as br
from zerver.lib.webhooks.bodyfunctions import code as cd
from zerver.lib.webhooks.bodyfunctions import pipelines as pl
from zerver.lib.webhooks.bodyfunctions import workitems as wk
class Helper:
def __init__(
self,
payload: Dict[str, Iterable[Dict[str, Any]]],
include_title: bool,
) -> None:
self.payload = payload
self.include_title = include_title
def log_unsupported(self, event: str) -> None:
summary = f"The '{event}' event isn't currently supported by the cofazure webhook"
log_exception_to_webhook_logger(
summary=summary,
unsupported_event=True,
)
"""
"""
Mapeamento do eventtype devolvido pelo GIT
A cada eventtype está a associada uma função que devolve o body e o topic da mensagem a publicar
As funcoes encontram-se num outro ficheiro chamado bodyfunctions
"""
"""
EVENT_FUNCTION_MAPPER:Dict[str, Dict[str, Any]] ={
"ms.vss-release.deployment-started-event": {"Function":br.release_deployment_started_body,
"Active":True},
"build.complete": {"Function": br.build_completed_body,
"Active": True},
"ms.vss-release.release-abandoned-event": {"Function": br.release_abandoned_body,
"Active": True},
"ms.vss-release.release-created-event": {"Function": br.release_created_body,
"Active": True},
"ms.vss-release.deployment-approval-completed-event": {"Function": br.release_deployment_approval_completed_body,
"Active": True},
"ms.vss-release.deployment-approval-pending-event": {"Function": br.release_deployment_approval_pending_body,
"Active": True},
"git.pullrequest.merged": {"Function": cd.pull_request_merged_body,
"Active": True},
"git.pullrequest.updated": {"Function": cd.pull_request_updated_body,
"Active": True},
"tfvc.checkin": {"Function": cd.checkin_body,
"Active": True},
"git.push": {"Function": cd.push_body,
"Active": True},
"git.pullrequest.created": {"Function": cd.pull_request_created_body,
"Active": True},
"ms.vss-pipelines.run-state-changed-event": {"Function": pl.run_state_changed_body,
"Active": True},
"ms.vss-pipelinechecks-events.approval-completed": {"Function": pl.run_stage_approval_completed_body,
"Active": True},
"ms.vss-pipelinechecks-events.approval-pending": {"Function": pl.run_stage_waiting_for_approval_body,
"Active": True},
"ms.vss-pipelines.stage-state-changed-event": {"Function": pl.run_stage_state_changed_body,
"Active": True},
"workitem.restored": {"Function": wk.work_item_restored_body,
"Active": True},
"workitem.updated": {"Function": wk.work_item_updated_body,
"Active": True},
"workitem.commented": {"Function": wk.work_item_commented_body,
"Active": True},
"workitem.created": {"Function": wk.work_item_created_body,
"Active": True},
"workitem.deleted": {"Function": wk.work_item_deleted_body,
"Active": True},
}
@webhook_view('cofazure')
@has_request_variables
def api_cofazure_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Iterable[Dict[str, Any]]] = REQ(argument_type='body'),
) -> HttpResponse:
# Retira o json que vem no body o valor do atributo eventType
try:
event = payload["eventType"]
# Valida se o evento vem preenchido
if event is None:
# Helper.log_unsupported(event)
return json_success()
# Retira a função a executrar da lista configurada mais acima
body_function = EVENT_FUNCTION_MAPPER[event]["Functions"]
# Valida se existe função para o evento pretendido
if body_function is None:
# Helper.log_unsupported(event)
return json_success()
function_state = EVENT_FUNCTION_MAPPER[event]["Active"]
if function_state == False:
return json_success()
# cria o objecto para passar para a função e atribui á variavel payload o conteudo do json do GIT
helper = Helper(payload=payload,
include_title="",
)
# executa a função para obter o topic e o body
topic, body = body_function(helper)
# publica na stream uma mensagem com o topic e o body obtidos
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
except:
return json_success()
"""
| 45.590909 | 117 | 0.615354 | 1,088 | 10,030 | 5.488971 | 0.171875 | 0.066979 | 0.093771 | 0.042867 | 0.834896 | 0.821835 | 0.821835 | 0.816477 | 0.816477 | 0.808439 | 0 | 0 | 0.282951 | 10,030 | 219 | 118 | 45.799087 | 0.830367 | 0.021934 | 0 | 0.3 | 0 | 0 | 0.23451 | 0.111824 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.157143 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2c8231bfc1a04590d8a38e2d4a7095b5409d69c8 | 156 | py | Python | macOS/Cut.py | bliles/autokey-macos | c8d44b83f6d4117b9430ede0c0aec81f72d9feab | [
"MIT"
] | 38 | 2019-04-06T01:20:26.000Z | 2022-02-22T03:02:40.000Z | macOS/Cut.py | bliles/autokey-macos | c8d44b83f6d4117b9430ede0c0aec81f72d9feab | [
"MIT"
] | null | null | null | macOS/Cut.py | bliles/autokey-macos | c8d44b83f6d4117b9430ede0c0aec81f72d9feab | [
"MIT"
] | 8 | 2019-04-06T01:20:34.000Z | 2022-03-31T14:10:04.000Z | if window.get_active_class() != 'gnome-terminal-server.Gnome-terminal':
keyboard.send_keys("<ctrl>+x")
else:
keyboard.send_keys("<ctrl>+<shift>+c")
| 31.2 | 71 | 0.698718 | 22 | 156 | 4.772727 | 0.727273 | 0.247619 | 0.304762 | 0.380952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096154 | 156 | 4 | 72 | 39 | 0.744681 | 0 | 0 | 0 | 0 | 0 | 0.384615 | 0.230769 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2c93c382e0baa98d3b3365c60b22b56a464cb1af | 191,137 | py | Python | sdk/python/pulumi_kubernetes_ingress_nginx/_inputs.py | joeduffy/pulumi-kubernetes-ingress-nginx | efc5b9b67efa2c4348869e3038c3c3725ef28915 | [
"Apache-2.0"
] | 5 | 2021-11-16T18:59:37.000Z | 2022-03-28T07:44:12.000Z | sdk/python/pulumi_kubernetes_ingress_nginx/_inputs.py | joeduffy/pulumi-kubernetes-ingress-nginx | efc5b9b67efa2c4348869e3038c3c3725ef28915 | [
"Apache-2.0"
] | 2 | 2021-12-07T08:40:42.000Z | 2021-12-22T13:00:27.000Z | sdk/python/pulumi_kubernetes_ingress_nginx/_inputs.py | joeduffy/pulumi-kubernetes-ingress-nginx | efc5b9b67efa2c4348869e3038c3c3725ef28915 | [
"Apache-2.0"
] | 1 | 2022-03-18T13:37:08.000Z | 2022-03-18T13:37:08.000Z | # coding=utf-8
# *** WARNING: this file was generated by Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
import pulumi_kubernetes
__all__ = [
'AutoscalingBehaviorScalingPolicyArgs',
'AutoscalingBehaviorScalingArgs',
'AutoscalingBehaviorArgs',
'AutoscalingTemplatePodsMetricArgs',
'AutoscalingTemplatePodsTargetArgs',
'AutoscalingTemplatePodsArgs',
'AutoscalingTemplateArgs',
'AutoscalingArgs',
'ContollerAdmissionWebhooksArgs',
'ControllerAdmissionWebhooksCreateSecretJobArgs',
'ControllerAdmissionWebhooksPatchWebhbookJobArgs',
'ControllerAdmissionWebhooksPatchArgs',
'ControllerAdmissionWebhooksServiceArgs',
'ControllerCustomTemplateArgs',
'ControllerDefaultBackendServiceArgs',
'ControllerDefaultBackendArgs',
'ControllerHostPortPortsArgs',
'ControllerHostPortArgs',
'ControllerImageArgs',
'ControllerIngressClassResourceArgs',
'ControllerMetricsPrometheusRulesArgs',
'ControllerMetricsServiceMonitorArgs',
'ControllerMetricsServiceArgs',
'ControllerMetricsArgs',
'ControllerPodSecurityPolicyArgs',
'ControllerPortArgs',
'ControllerPublishServiceArgs',
'ControllerRBACArgs',
'ControllerRollingUpdateArgs',
'ControllerScopeArgs',
'ControllerServiceAccountArgs',
'ControllerServiceInternalArgs',
'ControllerServiceNodePortsArgs',
'ControllerServiceArgs',
'ControllerTcpArgs',
'ControllerUdpArgs',
'ControllerUpdateStrategyArgs',
'ControllerArgs',
'KedaScaledObjectArgs',
'KedaTriggerArgs',
'KedaArgs',
'ReleaseArgs',
'RepositoryOptsArgs',
]
@pulumi.input_type
class AutoscalingBehaviorScalingPolicyArgs:
def __init__(__self__, *,
period_seconds: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[int]] = None):
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class AutoscalingBehaviorScalingArgs:
def __init__(__self__, *,
policies: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingBehaviorScalingPolicyArgs']]]] = None,
stabilization_window_seconds: Optional[pulumi.Input[int]] = None):
if policies is not None:
pulumi.set(__self__, "policies", policies)
if stabilization_window_seconds is not None:
pulumi.set(__self__, "stabilization_window_seconds", stabilization_window_seconds)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingBehaviorScalingPolicyArgs']]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingBehaviorScalingPolicyArgs']]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="stabilizationWindowSeconds")
def stabilization_window_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "stabilization_window_seconds")
@stabilization_window_seconds.setter
def stabilization_window_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "stabilization_window_seconds", value)
@pulumi.input_type
class AutoscalingBehaviorArgs:
def __init__(__self__, *,
scale_down: Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']] = None,
scale_up: Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']] = None):
if scale_down is not None:
pulumi.set(__self__, "scale_down", scale_down)
if scale_up is not None:
pulumi.set(__self__, "scale_up", scale_up)
@property
@pulumi.getter(name="scaleDown")
def scale_down(self) -> Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']]:
return pulumi.get(self, "scale_down")
@scale_down.setter
def scale_down(self, value: Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']]):
pulumi.set(self, "scale_down", value)
@property
@pulumi.getter(name="scaleUp")
def scale_up(self) -> Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']]:
return pulumi.get(self, "scale_up")
@scale_up.setter
def scale_up(self, value: Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']]):
pulumi.set(self, "scale_up", value)
@pulumi.input_type
class AutoscalingTemplatePodsMetricArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class AutoscalingTemplatePodsTargetArgs:
def __init__(__self__, *,
average_value: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if average_value is not None:
pulumi.set(__self__, "average_value", average_value)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="averageValue")
def average_value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "average_value")
@average_value.setter
def average_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "average_value", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class AutoscalingTemplatePodsArgs:
def __init__(__self__, *,
metric: Optional[pulumi.Input['AutoscalingTemplatePodsMetricArgs']] = None,
target: Optional[pulumi.Input['AutoscalingTemplatePodsTargetArgs']] = None):
if metric is not None:
pulumi.set(__self__, "metric", metric)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter
def metric(self) -> Optional[pulumi.Input['AutoscalingTemplatePodsMetricArgs']]:
return pulumi.get(self, "metric")
@metric.setter
def metric(self, value: Optional[pulumi.Input['AutoscalingTemplatePodsMetricArgs']]):
pulumi.set(self, "metric", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['AutoscalingTemplatePodsTargetArgs']]:
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['AutoscalingTemplatePodsTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class AutoscalingTemplateArgs:
def __init__(__self__, *,
pods: Optional[pulumi.Input['AutoscalingTemplatePodsArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
if pods is not None:
pulumi.set(__self__, "pods", pods)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def pods(self) -> Optional[pulumi.Input['AutoscalingTemplatePodsArgs']]:
return pulumi.get(self, "pods")
@pods.setter
def pods(self, value: Optional[pulumi.Input['AutoscalingTemplatePodsArgs']]):
pulumi.set(self, "pods", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class AutoscalingArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
controller_autoscaling_behavior: Optional[pulumi.Input['AutoscalingBehaviorArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
max_replicas: Optional[pulumi.Input[int]] = None,
min_replicas: Optional[pulumi.Input[int]] = None,
target_cpu_utilization_percentage: Optional[pulumi.Input[int]] = None,
target_memory_utilization_percentage: Optional[pulumi.Input[int]] = None):
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if controller_autoscaling_behavior is not None:
pulumi.set(__self__, "controller_autoscaling_behavior", controller_autoscaling_behavior)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if max_replicas is not None:
pulumi.set(__self__, "max_replicas", max_replicas)
if min_replicas is not None:
pulumi.set(__self__, "min_replicas", min_replicas)
if target_cpu_utilization_percentage is not None:
pulumi.set(__self__, "target_cpu_utilization_percentage", target_cpu_utilization_percentage)
if target_memory_utilization_percentage is not None:
pulumi.set(__self__, "target_memory_utilization_percentage", target_memory_utilization_percentage)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="controllerAutoscalingBehavior")
def controller_autoscaling_behavior(self) -> Optional[pulumi.Input['AutoscalingBehaviorArgs']]:
return pulumi.get(self, "controller_autoscaling_behavior")
@controller_autoscaling_behavior.setter
def controller_autoscaling_behavior(self, value: Optional[pulumi.Input['AutoscalingBehaviorArgs']]):
pulumi.set(self, "controller_autoscaling_behavior", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="maxReplicas")
def max_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_replicas")
@max_replicas.setter
def max_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_replicas", value)
@property
@pulumi.getter(name="minReplicas")
def min_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_replicas")
@min_replicas.setter
def min_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replicas", value)
@property
@pulumi.getter(name="targetCPUUtilizationPercentage")
def target_cpu_utilization_percentage(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "target_cpu_utilization_percentage")
@target_cpu_utilization_percentage.setter
def target_cpu_utilization_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_cpu_utilization_percentage", value)
@property
@pulumi.getter(name="targetMemoryUtilizationPercentage")
def target_memory_utilization_percentage(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "target_memory_utilization_percentage")
@target_memory_utilization_percentage.setter
def target_memory_utilization_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_memory_utilization_percentage", value)
@pulumi.input_type
class ContollerAdmissionWebhooksArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
create_secret_job: Optional[pulumi.Input['ControllerAdmissionWebhooksCreateSecretJobArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
existing_psp: Optional[pulumi.Input[str]] = None,
failure_policy: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
namespace_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
object_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
patch: Optional[pulumi.Input['ControllerAdmissionWebhooksPatchArgs']] = None,
patch_webhook_job: Optional[pulumi.Input['ControllerAdmissionWebhooksPatchWebhbookJobArgs']] = None,
port: Optional[pulumi.Input[int]] = None,
service: Optional[pulumi.Input['ControllerAdmissionWebhooksServiceArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] existing_psp: Use an existing PSP instead of creating one.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if create_secret_job is not None:
pulumi.set(__self__, "create_secret_job", create_secret_job)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if existing_psp is not None:
pulumi.set(__self__, "existing_psp", existing_psp)
if failure_policy is not None:
pulumi.set(__self__, "failure_policy", failure_policy)
if key is not None:
pulumi.set(__self__, "key", key)
if namespace_selector is not None:
pulumi.set(__self__, "namespace_selector", namespace_selector)
if object_selector is not None:
pulumi.set(__self__, "object_selector", object_selector)
if patch is not None:
pulumi.set(__self__, "patch", patch)
if patch_webhook_job is not None:
pulumi.set(__self__, "patch_webhook_job", patch_webhook_job)
if port is not None:
pulumi.set(__self__, "port", port)
if service is not None:
pulumi.set(__self__, "service", service)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="createSecretJob")
def create_secret_job(self) -> Optional[pulumi.Input['ControllerAdmissionWebhooksCreateSecretJobArgs']]:
return pulumi.get(self, "create_secret_job")
@create_secret_job.setter
def create_secret_job(self, value: Optional[pulumi.Input['ControllerAdmissionWebhooksCreateSecretJobArgs']]):
pulumi.set(self, "create_secret_job", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="existingPsp")
def existing_psp(self) -> Optional[pulumi.Input[str]]:
"""
Use an existing PSP instead of creating one.
"""
return pulumi.get(self, "existing_psp")
@existing_psp.setter
def existing_psp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "existing_psp", value)
@property
@pulumi.getter(name="failurePolicy")
def failure_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "failure_policy")
@failure_policy.setter
def failure_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "failure_policy", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="namespaceSelector")
def namespace_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "namespace_selector")
@namespace_selector.setter
def namespace_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "namespace_selector", value)
@property
@pulumi.getter(name="objectSelector")
def object_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "object_selector")
@object_selector.setter
def object_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "object_selector", value)
@property
@pulumi.getter
def patch(self) -> Optional[pulumi.Input['ControllerAdmissionWebhooksPatchArgs']]:
return pulumi.get(self, "patch")
@patch.setter
def patch(self, value: Optional[pulumi.Input['ControllerAdmissionWebhooksPatchArgs']]):
pulumi.set(self, "patch", value)
@property
@pulumi.getter(name="patchWebhookJob")
def patch_webhook_job(self) -> Optional[pulumi.Input['ControllerAdmissionWebhooksPatchWebhbookJobArgs']]:
return pulumi.get(self, "patch_webhook_job")
@patch_webhook_job.setter
def patch_webhook_job(self, value: Optional[pulumi.Input['ControllerAdmissionWebhooksPatchWebhbookJobArgs']]):
pulumi.set(self, "patch_webhook_job", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerAdmissionWebhooksServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerAdmissionWebhooksServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class ControllerAdmissionWebhooksCreateSecretJobArgs:
def __init__(__self__, *,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None):
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class ControllerAdmissionWebhooksPatchWebhbookJobArgs:
def __init__(__self__, *,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None):
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class ControllerAdmissionWebhooksPatchArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
image: Optional[pulumi.Input['ControllerImageArgs']] = None,
node_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
priority_class_name: Optional[pulumi.Input[str]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]] = None):
"""
:param pulumi.Input[str] priority_class_name: Provide a priority class name to the webhook patching job.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if image is not None:
pulumi.set(__self__, "image", image)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if pod_annotations is not None:
pulumi.set(__self__, "pod_annotations", pod_annotations)
if priority_class_name is not None:
pulumi.set(__self__, "priority_class_name", priority_class_name)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['ControllerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['ControllerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
"""
Provide a priority class name to the webhook patching job.
"""
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]:
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]):
pulumi.set(self, "tolerations", value)
@pulumi.input_type
class ControllerAdmissionWebhooksServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="servicePort")
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerCustomTemplateArgs:
def __init__(__self__, *,
config_map_key: Optional[pulumi.Input[str]] = None,
config_map_name: Optional[pulumi.Input[str]] = None):
if config_map_key is not None:
pulumi.set(__self__, "config_map_key", config_map_key)
if config_map_name is not None:
pulumi.set(__self__, "config_map_name", config_map_name)
@property
@pulumi.getter(name="configMapKey")
def config_map_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_key")
@config_map_key.setter
def config_map_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_key", value)
@property
@pulumi.getter(name="configMapName")
def config_map_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_name")
@config_map_name.setter
def config_map_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_name", value)
@pulumi.input_type
class ControllerDefaultBackendServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
load_balancer_ip: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_ips: List of IP addresses at which the default backend service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if load_balancer_ip is not None:
pulumi.set(__self__, "load_balancer_ip", load_balancer_ip)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of IP addresses at which the default backend service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
"""
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="loadBalancerIP")
def load_balancer_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ip")
@load_balancer_ip.setter
def load_balancer_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ip", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="servicePort")
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerDefaultBackendArgs:
def __init__(__self__, *,
affinity: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']] = None,
autoscaling: Optional[pulumi.Input['AutoscalingArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
existing_psp: Optional[pulumi.Input[str]] = None,
extra_args: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
extra_envs: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]] = None,
image: Optional[pulumi.Input['ControllerImageArgs']] = None,
liveness_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
min_available: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
node_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_security_context: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']] = None,
port: Optional[pulumi.Input[int]] = None,
priority_class_name: Optional[pulumi.Input[str]] = None,
readiness_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None,
service: Optional[pulumi.Input['ControllerDefaultBackendServiceArgs']] = None,
service_account: Optional[pulumi.Input['ControllerServiceAccountArgs']] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]] = None):
"""
:param pulumi.Input[str] existing_psp: Use an existing PSP instead of creating one.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]] extra_volume_mounts: Additional volumeMounts to the default backend container. - name: copy-portal-skins mountPath: /var/lib/lemonldap-ng/portal/skins
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]] extra_volumes: Additional volumes to the default backend pod. - name: copy-portal-skins emptyDir: {}
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] liveness_probe: Liveness probe values for default backend. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_selector: Node labels for default backend pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_annotations: Annotations to be added to default backend pods.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_labels: labels to add to the pod container metadata
:param pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs'] pod_security_context: Security Context policies for controller pods. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] readiness_probe: Readiness probe values for default backend. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]] tolerations: Node tolerations for server scheduling to nodes with taints. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
"""
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if autoscaling is not None:
pulumi.set(__self__, "autoscaling", autoscaling)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if existing_psp is not None:
pulumi.set(__self__, "existing_psp", existing_psp)
if extra_args is not None:
pulumi.set(__self__, "extra_args", extra_args)
if extra_envs is not None:
pulumi.set(__self__, "extra_envs", extra_envs)
if extra_volume_mounts is not None:
pulumi.set(__self__, "extra_volume_mounts", extra_volume_mounts)
if extra_volumes is not None:
pulumi.set(__self__, "extra_volumes", extra_volumes)
if image is not None:
pulumi.set(__self__, "image", image)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if min_available is not None:
pulumi.set(__self__, "min_available", min_available)
if name is not None:
pulumi.set(__self__, "name", name)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if pod_annotations is not None:
pulumi.set(__self__, "pod_annotations", pod_annotations)
if pod_labels is not None:
pulumi.set(__self__, "pod_labels", pod_labels)
if pod_security_context is not None:
pulumi.set(__self__, "pod_security_context", pod_security_context)
if port is not None:
pulumi.set(__self__, "port", port)
if priority_class_name is not None:
pulumi.set(__self__, "priority_class_name", priority_class_name)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if service is not None:
pulumi.set(__self__, "service", service)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]:
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter
def autoscaling(self) -> Optional[pulumi.Input['AutoscalingArgs']]:
return pulumi.get(self, "autoscaling")
@autoscaling.setter
def autoscaling(self, value: Optional[pulumi.Input['AutoscalingArgs']]):
pulumi.set(self, "autoscaling", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="existingPsp")
def existing_psp(self) -> Optional[pulumi.Input[str]]:
"""
Use an existing PSP instead of creating one.
"""
return pulumi.get(self, "existing_psp")
@existing_psp.setter
def existing_psp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "existing_psp", value)
@property
@pulumi.getter(name="extraArgs")
def extra_args(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "extra_args")
@extra_args.setter
def extra_args(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "extra_args", value)
@property
@pulumi.getter(name="extraEnvs")
def extra_envs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]:
return pulumi.get(self, "extra_envs")
@extra_envs.setter
def extra_envs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]):
pulumi.set(self, "extra_envs", value)
@property
@pulumi.getter(name="extraVolumeMounts")
def extra_volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]:
"""
Additional volumeMounts to the default backend container. - name: copy-portal-skins mountPath: /var/lib/lemonldap-ng/portal/skins
"""
return pulumi.get(self, "extra_volume_mounts")
@extra_volume_mounts.setter
def extra_volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]):
pulumi.set(self, "extra_volume_mounts", value)
@property
@pulumi.getter(name="extraVolumes")
def extra_volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]:
"""
Additional volumes to the default backend pod. - name: copy-portal-skins emptyDir: {}
"""
return pulumi.get(self, "extra_volumes")
@extra_volumes.setter
def extra_volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]):
pulumi.set(self, "extra_volumes", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['ControllerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['ControllerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Liveness probe values for default backend. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter(name="minAvailable")
def min_available(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_available")
@min_available.setter
def min_available(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_available", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Node labels for default backend pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
"""
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to default backend pods.
"""
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="podLabels")
def pod_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
labels to add to the pod container metadata
"""
return pulumi.get(self, "pod_labels")
@pod_labels.setter
def pod_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_labels", value)
@property
@pulumi.getter(name="podSecurityContext")
def pod_security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]:
"""
Security Context policies for controller pods. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
"""
return pulumi.get(self, "pod_security_context")
@pod_security_context.setter
def pod_security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]):
pulumi.set(self, "pod_security_context", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Readiness probe values for default backend. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerDefaultBackendServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerDefaultBackendServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input['ControllerServiceAccountArgs']]:
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input['ControllerServiceAccountArgs']]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]:
"""
Node tolerations for server scheduling to nodes with taints. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
"""
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]):
pulumi.set(self, "tolerations", value)
@pulumi.input_type
class ControllerHostPortPortsArgs:
def __init__(__self__, *,
http: Optional[pulumi.Input[int]] = None,
https: Optional[pulumi.Input[int]] = None):
if http is not None:
pulumi.set(__self__, "http", http)
if https is not None:
pulumi.set(__self__, "https", https)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def https(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "https")
@https.setter
def https(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https", value)
@pulumi.input_type
class ControllerHostPortArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
ports: Optional[pulumi.Input['ControllerHostPortPortsArgs']] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if ports is not None:
pulumi.set(__self__, "ports", ports)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input['ControllerHostPortPortsArgs']]:
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input['ControllerHostPortPortsArgs']]):
pulumi.set(self, "ports", value)
@pulumi.input_type
class ControllerImageArgs:
def __init__(__self__, *,
allow_privilege_escalation: Optional[pulumi.Input[bool]] = None,
digest: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input[str]] = None,
pull_policy: Optional[pulumi.Input[str]] = None,
read_only_root_filesystem: Optional[pulumi.Input[bool]] = None,
registry: Optional[pulumi.Input[str]] = None,
repository: Optional[pulumi.Input[str]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: for backwards compatibility consider setting the full image url via the repository value below use *either* current default registry/image or repository format or installing will fail.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if digest is not None:
pulumi.set(__self__, "digest", digest)
if image is not None:
pulumi.set(__self__, "image", image)
if pull_policy is not None:
pulumi.set(__self__, "pull_policy", pull_policy)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if registry is not None:
pulumi.set(__self__, "registry", registry)
if repository is not None:
pulumi.set(__self__, "repository", repository)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "allow_privilege_escalation")
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_privilege_escalation", value)
@property
@pulumi.getter
def digest(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "digest")
@digest.setter
def digest(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "digest", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="pullPolicy")
def pull_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pull_policy")
@pull_policy.setter
def pull_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pull_policy", value)
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "read_only_root_filesystem")
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only_root_filesystem", value)
@property
@pulumi.getter
def registry(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "registry")
@registry.setter
def registry(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registry", value)
@property
@pulumi.getter
def repository(self) -> Optional[pulumi.Input[str]]:
"""
for backwards compatibility consider setting the full image url via the repository value below use *either* current default registry/image or repository format or installing will fail.
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class ControllerIngressClassResourceArgs:
def __init__(__self__, *,
controller_value: Optional[pulumi.Input[str]] = None,
default: Optional[pulumi.Input[bool]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] parameters: Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.
"""
if controller_value is not None:
pulumi.set(__self__, "controller_value", controller_value)
if default is not None:
pulumi.set(__self__, "default", default)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter(name="controllerValue")
def controller_value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "controller_value")
@controller_value.setter
def controller_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "controller_value", value)
@property
@pulumi.getter
def default(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "default")
@default.setter
def default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "default", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class ControllerMetricsPrometheusRulesArgs:
def __init__(__self__, *,
additional_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
namespace: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
if additional_labels is not None:
pulumi.set(__self__, "additional_labels", additional_labels)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter(name="additionalLabels")
def additional_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "additional_labels")
@additional_labels.setter
def additional_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "additional_labels", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "rules", value)
@pulumi.input_type
class ControllerMetricsServiceMonitorArgs:
def __init__(__self__, *,
additional_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
honor_labels: Optional[pulumi.Input[bool]] = None,
job_label: Optional[pulumi.Input[str]] = None,
metric_relabelings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
namespace: Optional[pulumi.Input[str]] = None,
namespace_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
scrape_interval: Optional[pulumi.Input[str]] = None,
target_labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] job_label: The label to use to retrieve the job name from.
"""
if additional_labels is not None:
pulumi.set(__self__, "additional_labels", additional_labels)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if honor_labels is not None:
pulumi.set(__self__, "honor_labels", honor_labels)
if job_label is not None:
pulumi.set(__self__, "job_label", job_label)
if metric_relabelings is not None:
pulumi.set(__self__, "metric_relabelings", metric_relabelings)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if namespace_selector is not None:
pulumi.set(__self__, "namespace_selector", namespace_selector)
if scrape_interval is not None:
pulumi.set(__self__, "scrape_interval", scrape_interval)
if target_labels is not None:
pulumi.set(__self__, "target_labels", target_labels)
@property
@pulumi.getter(name="additionalLabels")
def additional_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "additional_labels")
@additional_labels.setter
def additional_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "additional_labels", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="honorLabels")
def honor_labels(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "honor_labels")
@honor_labels.setter
def honor_labels(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "honor_labels", value)
@property
@pulumi.getter(name="jobLabel")
def job_label(self) -> Optional[pulumi.Input[str]]:
"""
The label to use to retrieve the job name from.
"""
return pulumi.get(self, "job_label")
@job_label.setter
def job_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_label", value)
@property
@pulumi.getter(name="metricRelabelings")
def metric_relabelings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "metric_relabelings")
@metric_relabelings.setter
def metric_relabelings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "metric_relabelings", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="namespaceSelector")
def namespace_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "namespace_selector")
@namespace_selector.setter
def namespace_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "namespace_selector", value)
@property
@pulumi.getter(name="scrapeInterval")
def scrape_interval(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scrape_interval")
@scrape_interval.setter
def scrape_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scrape_interval", value)
@property
@pulumi.getter(name="targetLabels")
def target_labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "target_labels")
@target_labels.setter
def target_labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_labels", value)
@pulumi.input_type
class ControllerMetricsServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_port: Optional[pulumi.Input[str]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if node_port is not None:
pulumi.set(__self__, "node_port", node_port)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="nodePort")
def node_port(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "node_port")
@node_port.setter
def node_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_port", value)
@property
@pulumi.getter(name="servicePort")
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerMetricsArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
prometheus_rule: Optional[pulumi.Input['ControllerMetricsPrometheusRulesArgs']] = None,
service: Optional[pulumi.Input['ControllerMetricsServiceArgs']] = None,
service_monitor: Optional[pulumi.Input['ControllerMetricsServiceMonitorArgs']] = None):
"""
:param pulumi.Input[int] port: if this port is changed, change healthz-port: in extraArgs: accordingly.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if port is not None:
pulumi.set(__self__, "port", port)
if prometheus_rule is not None:
pulumi.set(__self__, "prometheus_rule", prometheus_rule)
if service is not None:
pulumi.set(__self__, "service", service)
if service_monitor is not None:
pulumi.set(__self__, "service_monitor", service_monitor)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
if this port is changed, change healthz-port: in extraArgs: accordingly.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="prometheusRule")
def prometheus_rule(self) -> Optional[pulumi.Input['ControllerMetricsPrometheusRulesArgs']]:
return pulumi.get(self, "prometheus_rule")
@prometheus_rule.setter
def prometheus_rule(self, value: Optional[pulumi.Input['ControllerMetricsPrometheusRulesArgs']]):
pulumi.set(self, "prometheus_rule", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerMetricsServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerMetricsServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="serviceMonitor")
def service_monitor(self) -> Optional[pulumi.Input['ControllerMetricsServiceMonitorArgs']]:
return pulumi.get(self, "service_monitor")
@service_monitor.setter
def service_monitor(self, value: Optional[pulumi.Input['ControllerMetricsServiceMonitorArgs']]):
pulumi.set(self, "service_monitor", value)
@pulumi.input_type
class ControllerPodSecurityPolicyArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class ControllerPortArgs:
def __init__(__self__, *,
http: Optional[pulumi.Input[int]] = None,
https: Optional[pulumi.Input[int]] = None):
if http is not None:
pulumi.set(__self__, "http", http)
if https is not None:
pulumi.set(__self__, "https", https)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def https(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "https")
@https.setter
def https(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https", value)
@pulumi.input_type
class ControllerPublishServiceArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
path_override: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] path_override: Allows overriding of the publish service to bind to. Must be <namespace>/<service_name>.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if path_override is not None:
pulumi.set(__self__, "path_override", path_override)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="pathOverride")
def path_override(self) -> Optional[pulumi.Input[str]]:
"""
Allows overriding of the publish service to bind to. Must be <namespace>/<service_name>.
"""
return pulumi.get(self, "path_override")
@path_override.setter
def path_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path_override", value)
@pulumi.input_type
class ControllerRBACArgs:
def __init__(__self__, *,
create: Optional[pulumi.Input[bool]] = None,
scope: Optional[pulumi.Input[bool]] = None):
if create is not None:
pulumi.set(__self__, "create", create)
if scope is not None:
pulumi.set(__self__, "scope", scope)
@property
@pulumi.getter
def create(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "create")
@create.setter
def create(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "create", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "scope", value)
@pulumi.input_type
class ControllerRollingUpdateArgs:
def __init__(__self__, *,
max_unavailable: Optional[pulumi.Input[int]] = None):
if max_unavailable is not None:
pulumi.set(__self__, "max_unavailable", max_unavailable)
@property
@pulumi.getter(name="maxUnavailable")
def max_unavailable(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_unavailable")
@max_unavailable.setter
def max_unavailable(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_unavailable", value)
@pulumi.input_type
class ControllerScopeArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
namespace: Optional[pulumi.Input[str]] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class ControllerServiceAccountArgs:
def __init__(__self__, *,
automount_service_account_token: Optional[pulumi.Input[bool]] = None,
create: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
if automount_service_account_token is not None:
pulumi.set(__self__, "automount_service_account_token", automount_service_account_token)
if create is not None:
pulumi.set(__self__, "create", create)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="automountServiceAccountToken")
def automount_service_account_token(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "automount_service_account_token")
@automount_service_account_token.setter
def automount_service_account_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automount_service_account_token", value)
@property
@pulumi.getter
def create(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "create")
@create.setter
def create(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "create", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ControllerServiceInternalArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] external_traffic_policy: Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
:param pulumi.Input[Sequence[pulumi.Input[str]]] load_balancer_source_ranges: Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
"""
Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
"""
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
"""
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@pulumi.input_type
class ControllerServiceNodePortsArgs:
def __init__(__self__, *,
http: Optional[pulumi.Input[str]] = None,
https: Optional[pulumi.Input[str]] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
if http is not None:
pulumi.set(__self__, "http", http)
if https is not None:
pulumi.set(__self__, "https", https)
if tcp is not None:
pulumi.set(__self__, "tcp", tcp)
if udp is not None:
pulumi.set(__self__, "udp", udp)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def https(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "https")
@https.setter
def https(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "https", value)
@property
@pulumi.getter
def tcp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "tcp")
@tcp.setter
def tcp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "tcp", value)
@property
@pulumi.getter
def udp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "udp")
@udp.setter
def udp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "udp", value)
@pulumi.input_type
class ControllerServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
enable_http: Optional[pulumi.Input[bool]] = None,
enable_https: Optional[pulumi.Input[bool]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
health_check_node_port: Optional[pulumi.Input[int]] = None,
internal: Optional[pulumi.Input['ControllerServiceInternalArgs']] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_ports: Optional[pulumi.Input['ControllerServiceNodePortsArgs']] = None,
ports: Optional[pulumi.Input['ControllerPortArgs']] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
target_ports: Optional[pulumi.Input['ControllerPortArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_ips: List of IP addresses at which the controller services are available Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
:param pulumi.Input[str] external_traffic_policy: Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
:param pulumi.Input[int] health_check_node_port: specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, the service controller allocates a port from your cluster’s NodePort range. Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
:param pulumi.Input['ControllerServiceInternalArgs'] internal: Enables an additional internal load balancer (besides the external one). Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
:param pulumi.Input[str] session_affinity: Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if enable_http is not None:
pulumi.set(__self__, "enable_http", enable_http)
if enable_https is not None:
pulumi.set(__self__, "enable_https", enable_https)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if health_check_node_port is not None:
pulumi.set(__self__, "health_check_node_port", health_check_node_port)
if internal is not None:
pulumi.set(__self__, "internal", internal)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if node_ports is not None:
pulumi.set(__self__, "node_ports", node_ports)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if target_ports is not None:
pulumi.set(__self__, "target_ports", target_ports)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="enableHttp")
def enable_http(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_http")
@enable_http.setter
def enable_http(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_http", value)
@property
@pulumi.getter(name="enableHttps")
def enable_https(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_https")
@enable_https.setter
def enable_https(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_https", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of IP addresses at which the controller services are available Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
"""
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
"""
Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
"""
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter(name="healthCheckNodePort")
def health_check_node_port(self) -> Optional[pulumi.Input[int]]:
"""
specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, the service controller allocates a port from your cluster’s NodePort range. Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
"""
return pulumi.get(self, "health_check_node_port")
@health_check_node_port.setter
def health_check_node_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_node_port", value)
@property
@pulumi.getter
def internal(self) -> Optional[pulumi.Input['ControllerServiceInternalArgs']]:
"""
Enables an additional internal load balancer (besides the external one). Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
"""
return pulumi.get(self, "internal")
@internal.setter
def internal(self, value: Optional[pulumi.Input['ControllerServiceInternalArgs']]):
pulumi.set(self, "internal", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="nodePorts")
def node_ports(self) -> Optional[pulumi.Input['ControllerServiceNodePortsArgs']]:
return pulumi.get(self, "node_ports")
@node_ports.setter
def node_ports(self, value: Optional[pulumi.Input['ControllerServiceNodePortsArgs']]):
pulumi.set(self, "node_ports", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input['ControllerPortArgs']]:
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input['ControllerPortArgs']]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
"""
Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="targetPorts")
def target_ports(self) -> Optional[pulumi.Input['ControllerPortArgs']]:
return pulumi.get(self, "target_ports")
@target_ports.setter
def target_ports(self, value: Optional[pulumi.Input['ControllerPortArgs']]):
pulumi.set(self, "target_ports", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerTcpArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations to be added to the tcp config configmap.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if config_map_namespace is not None:
pulumi.set(__self__, "config_map_namespace", config_map_namespace)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to the tcp config configmap.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="configMapNamespace")
def config_map_namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_namespace")
@config_map_namespace.setter
def config_map_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_namespace", value)
@pulumi.input_type
class ControllerUdpArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations to be added to the udp config configmap.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if config_map_namespace is not None:
pulumi.set(__self__, "config_map_namespace", config_map_namespace)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to the udp config configmap.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="configMapNamespace")
def config_map_namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_namespace")
@config_map_namespace.setter
def config_map_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_namespace", value)
@pulumi.input_type
class ControllerUpdateStrategyArgs:
def __init__(__self__, *,
rolling_update: Optional[pulumi.Input['ControllerRollingUpdateArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
if rolling_update is not None:
pulumi.set(__self__, "rolling_update", rolling_update)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="rollingUpdate")
def rolling_update(self) -> Optional[pulumi.Input['ControllerRollingUpdateArgs']]:
return pulumi.get(self, "rolling_update")
@rolling_update.setter
def rolling_update(self, value: Optional[pulumi.Input['ControllerRollingUpdateArgs']]):
pulumi.set(self, "rolling_update", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerArgs:
def __init__(__self__, *,
add_headers: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
admission_webhooks: Optional[pulumi.Input['ContollerAdmissionWebhooksArgs']] = None,
affinity: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']] = None,
allow_snippet_annotations: Optional[pulumi.Input[bool]] = None,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
autoscaling: Optional[pulumi.Input['AutoscalingArgs']] = None,
autoscaling_template: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]]] = None,
config: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
config_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
container_port: Optional[pulumi.Input['ControllerPortArgs']] = None,
custom_template: Optional[pulumi.Input['ControllerCustomTemplateArgs']] = None,
dns_config: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
dns_policy: Optional[pulumi.Input[str]] = None,
election_id: Optional[pulumi.Input[str]] = None,
enable_mimalloc: Optional[pulumi.Input[bool]] = None,
existing_psp: Optional[pulumi.Input[str]] = None,
extra_args: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
extra_containers: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]] = None,
extra_envs: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]] = None,
extra_init_containers: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]] = None,
health_check_path: Optional[pulumi.Input[str]] = None,
heath_check_host: Optional[pulumi.Input[str]] = None,
host_network: Optional[pulumi.Input[bool]] = None,
host_port: Optional[pulumi.Input['ControllerHostPortArgs']] = None,
hostname: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
image: Optional[pulumi.Input['ControllerImageArgs']] = None,
ingress_class_by_name: Optional[pulumi.Input[bool]] = None,
ingress_class_resource: Optional[pulumi.Input['ControllerIngressClassResourceArgs']] = None,
keda: Optional[pulumi.Input['KedaArgs']] = None,
kind: Optional[pulumi.Input[str]] = None,
lifecycle: Optional[pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs']] = None,
liveness_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
maxmind_license_key: Optional[pulumi.Input[str]] = None,
metrics: Optional[pulumi.Input['ControllerMetricsArgs']] = None,
min_available: Optional[pulumi.Input[int]] = None,
min_ready_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
node_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
pod_security_context: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']] = None,
priority_class_name: Optional[pulumi.Input[str]] = None,
proxy_set_headers: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
publish_service: Optional[pulumi.Input['ControllerPublishServiceArgs']] = None,
readiness_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
replica_count: Optional[pulumi.Input[int]] = None,
report_node_internal_ip: Optional[pulumi.Input[bool]] = None,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None,
scope: Optional[pulumi.Input['ControllerScopeArgs']] = None,
service: Optional[pulumi.Input['ControllerServiceArgs']] = None,
startup_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
tcp: Optional[pulumi.Input['ControllerTcpArgs']] = None,
terminate_grace_period_seconds: Optional[pulumi.Input[int]] = None,
tolerations: Optional[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']] = None,
topology_spread_constraints: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]]] = None,
udp: Optional[pulumi.Input['ControllerUdpArgs']] = None,
update_strategy: Optional[pulumi.Input['ControllerUpdateStrategyArgs']] = None,
watch_ingress_without_class: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] add_headers: Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers.
:param pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs'] affinity: Affinity and anti-affinity Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity.
:param pulumi.Input[bool] allow_snippet_annotations: This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations to be added to the controller Deployment or DaemonSet.
:param pulumi.Input['AutoscalingArgs'] autoscaling: Mutually exclusive with keda autoscaling.
:param pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]] autoscaling_template: Custom or additional autoscaling metrics ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] config: Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] config_annotations: Annotations to be added to the controller config configuration configmap.
:param pulumi.Input[str] config_map_namespace: Allows customization of the configmap / nginx-configmap namespace.
:param pulumi.Input[str] container_name: Configures the controller container name.
:param pulumi.Input['ControllerPortArgs'] container_port: Configures the ports the nginx-controller listens on.
:param pulumi.Input['ControllerCustomTemplateArgs'] custom_template: Override NGINX template.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] dns_config: Optionally customize the pod dnsConfig.
:param pulumi.Input[str] dns_policy: Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
:param pulumi.Input[str] election_id: Election ID to use for status update.
:param pulumi.Input[bool] enable_mimalloc: Enable mimalloc as a drop-in replacement for malloc. ref: https://github.com/microsoft/mimalloc.
:param pulumi.Input[str] existing_psp: Use an existing PSP instead of creating one.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] extra_args: Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use `default-ssl-certificate: "<namespace>/<secret_name>"`.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]] extra_containers: Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]] extra_envs: Additional environment variables to set.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]] extra_init_containers: Containers, which are run before the app containers are started. - name: init-myservice image: busybox command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]] extra_volume_mounts: Additional volumeMounts to the controller main container. - name: copy-portal-skins mountPath: /var/lib/lemonldap-ng/portal/skins
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]] extra_volumes: Additional volumes to the controller pod. - name: copy-portal-skins emptyDir: {}
:param pulumi.Input[str] health_check_path: Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path.
:param pulumi.Input[str] heath_check_host: Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the hostNetwork: true mode.
:param pulumi.Input[bool] host_network: Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged.
:param pulumi.Input['ControllerHostPortArgs'] host_port: Use host ports 80 and 443. Disabled by default.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] hostname: Optionally customize the pod hostname.
:param pulumi.Input[bool] ingress_class_by_name: Process IngressClass per name (additionally as per spec.controller).
:param pulumi.Input['ControllerIngressClassResourceArgs'] ingress_class_resource: This section refers to the creation of the IngressClass resource. IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19
:param pulumi.Input['KedaArgs'] keda: Mutually exclusive with hpa autoscaling.
:param pulumi.Input[str] kind: DaemonSet or Deployment.
:param pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs'] lifecycle: Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s.
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] liveness_probe: Liveness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[str] maxmind_license_key: Maxmind license key to download GeoLite2 Databases https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases.
:param pulumi.Input[int] min_ready_seconds: minReadySeconds to avoid killing pods before we are ready.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_selector: Node labels for controller pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_annotations: Annotations to be added to controller pods.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] pod_labels: labels to add to the pod container metadata.
:param pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs'] pod_security_context: Security Context policies for controller pods.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] proxy_set_headers: Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers.
:param pulumi.Input['ControllerPublishServiceArgs'] publish_service: Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running.
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] readiness_probe: Readiness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[bool] report_node_internal_ip: Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply.
:param pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs'] resources: Define requests resources to avoid probe issues due to CPU utilization in busy nodes ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 Ideally, there should be no limits. https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
:param pulumi.Input['ControllerScopeArgs'] scope: Limit the scope of the controller.
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] startup_probe: Startup probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] sysctls: See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
:param pulumi.Input['ControllerTcpArgs'] tcp: Allows customization of the tcp-services-configmap.
:param pulumi.Input[int] terminate_grace_period_seconds: How long to wait for the drain of connections.
:param pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs'] tolerations: Node tolerations for server scheduling to nodes with taints Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]] topology_spread_constraints: Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/.
:param pulumi.Input['ControllerUpdateStrategyArgs'] update_strategy: The update strategy to apply to the Deployment or DaemonSet.
:param pulumi.Input[bool] watch_ingress_without_class: Process Ingress objects without ingressClass annotation/ingressClassName field. Overrides value for --watch-ingress-without-class flag of the controller binary. Defaults to false.
"""
if add_headers is not None:
pulumi.set(__self__, "add_headers", add_headers)
if admission_webhooks is not None:
pulumi.set(__self__, "admission_webhooks", admission_webhooks)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if allow_snippet_annotations is not None:
pulumi.set(__self__, "allow_snippet_annotations", allow_snippet_annotations)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if autoscaling is not None:
pulumi.set(__self__, "autoscaling", autoscaling)
if autoscaling_template is not None:
pulumi.set(__self__, "autoscaling_template", autoscaling_template)
if config is not None:
pulumi.set(__self__, "config", config)
if config_annotations is not None:
pulumi.set(__self__, "config_annotations", config_annotations)
if config_map_namespace is not None:
pulumi.set(__self__, "config_map_namespace", config_map_namespace)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if container_port is not None:
pulumi.set(__self__, "container_port", container_port)
if custom_template is not None:
pulumi.set(__self__, "custom_template", custom_template)
if dns_config is not None:
pulumi.set(__self__, "dns_config", dns_config)
if dns_policy is not None:
pulumi.set(__self__, "dns_policy", dns_policy)
if election_id is not None:
pulumi.set(__self__, "election_id", election_id)
if enable_mimalloc is not None:
pulumi.set(__self__, "enable_mimalloc", enable_mimalloc)
if existing_psp is not None:
pulumi.set(__self__, "existing_psp", existing_psp)
if extra_args is not None:
pulumi.set(__self__, "extra_args", extra_args)
if extra_containers is not None:
pulumi.set(__self__, "extra_containers", extra_containers)
if extra_envs is not None:
pulumi.set(__self__, "extra_envs", extra_envs)
if extra_init_containers is not None:
pulumi.set(__self__, "extra_init_containers", extra_init_containers)
if extra_volume_mounts is not None:
pulumi.set(__self__, "extra_volume_mounts", extra_volume_mounts)
if extra_volumes is not None:
pulumi.set(__self__, "extra_volumes", extra_volumes)
if health_check_path is not None:
pulumi.set(__self__, "health_check_path", health_check_path)
if heath_check_host is not None:
pulumi.set(__self__, "heath_check_host", heath_check_host)
if host_network is not None:
pulumi.set(__self__, "host_network", host_network)
if host_port is not None:
pulumi.set(__self__, "host_port", host_port)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if image is not None:
pulumi.set(__self__, "image", image)
if ingress_class_by_name is not None:
pulumi.set(__self__, "ingress_class_by_name", ingress_class_by_name)
if ingress_class_resource is not None:
pulumi.set(__self__, "ingress_class_resource", ingress_class_resource)
if keda is not None:
pulumi.set(__self__, "keda", keda)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if lifecycle is not None:
pulumi.set(__self__, "lifecycle", lifecycle)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if maxmind_license_key is not None:
pulumi.set(__self__, "maxmind_license_key", maxmind_license_key)
if metrics is not None:
pulumi.set(__self__, "metrics", metrics)
if min_available is not None:
pulumi.set(__self__, "min_available", min_available)
if min_ready_seconds is not None:
pulumi.set(__self__, "min_ready_seconds", min_ready_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if pod_annotations is not None:
pulumi.set(__self__, "pod_annotations", pod_annotations)
if pod_labels is not None:
pulumi.set(__self__, "pod_labels", pod_labels)
if pod_security_context is not None:
pulumi.set(__self__, "pod_security_context", pod_security_context)
if priority_class_name is not None:
pulumi.set(__self__, "priority_class_name", priority_class_name)
if proxy_set_headers is not None:
pulumi.set(__self__, "proxy_set_headers", proxy_set_headers)
if publish_service is not None:
pulumi.set(__self__, "publish_service", publish_service)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if report_node_internal_ip is not None:
pulumi.set(__self__, "report_node_internal_ip", report_node_internal_ip)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if service is not None:
pulumi.set(__self__, "service", service)
if startup_probe is not None:
pulumi.set(__self__, "startup_probe", startup_probe)
if sysctls is not None:
pulumi.set(__self__, "sysctls", sysctls)
if tcp is not None:
pulumi.set(__self__, "tcp", tcp)
if terminate_grace_period_seconds is not None:
pulumi.set(__self__, "terminate_grace_period_seconds", terminate_grace_period_seconds)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
if topology_spread_constraints is not None:
pulumi.set(__self__, "topology_spread_constraints", topology_spread_constraints)
if udp is not None:
pulumi.set(__self__, "udp", udp)
if update_strategy is not None:
pulumi.set(__self__, "update_strategy", update_strategy)
if watch_ingress_without_class is not None:
pulumi.set(__self__, "watch_ingress_without_class", watch_ingress_without_class)
@property
@pulumi.getter(name="addHeaders")
def add_headers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers.
"""
return pulumi.get(self, "add_headers")
@add_headers.setter
def add_headers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "add_headers", value)
@property
@pulumi.getter(name="admissionWebhooks")
def admission_webhooks(self) -> Optional[pulumi.Input['ContollerAdmissionWebhooksArgs']]:
return pulumi.get(self, "admission_webhooks")
@admission_webhooks.setter
def admission_webhooks(self, value: Optional[pulumi.Input['ContollerAdmissionWebhooksArgs']]):
pulumi.set(self, "admission_webhooks", value)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]:
"""
Affinity and anti-affinity Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity.
"""
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter(name="allowSnippetAnnotations")
def allow_snippet_annotations(self) -> Optional[pulumi.Input[bool]]:
"""
This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected.
"""
return pulumi.get(self, "allow_snippet_annotations")
@allow_snippet_annotations.setter
def allow_snippet_annotations(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_snippet_annotations", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to the controller Deployment or DaemonSet.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def autoscaling(self) -> Optional[pulumi.Input['AutoscalingArgs']]:
"""
Mutually exclusive with keda autoscaling.
"""
return pulumi.get(self, "autoscaling")
@autoscaling.setter
def autoscaling(self, value: Optional[pulumi.Input['AutoscalingArgs']]):
pulumi.set(self, "autoscaling", value)
@property
@pulumi.getter(name="autoscalingTemplate")
def autoscaling_template(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]]]:
"""
Custom or additional autoscaling metrics ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
"""
return pulumi.get(self, "autoscaling_template")
@autoscaling_template.setter
def autoscaling_template(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]]]):
pulumi.set(self, "autoscaling_template", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter(name="configAnnotations")
def config_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Annotations to be added to the controller config configuration configmap.
"""
return pulumi.get(self, "config_annotations")
@config_annotations.setter
def config_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "config_annotations", value)
@property
@pulumi.getter(name="configMapNamespace")
def config_map_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Allows customization of the configmap / nginx-configmap namespace.
"""
return pulumi.get(self, "config_map_namespace")
@config_map_namespace.setter
def config_map_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_namespace", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Configures the controller container name.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> Optional[pulumi.Input['ControllerPortArgs']]:
"""
Configures the ports the nginx-controller listens on.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: Optional[pulumi.Input['ControllerPortArgs']]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter(name="customTemplate")
def custom_template(self) -> Optional[pulumi.Input['ControllerCustomTemplateArgs']]:
"""
Override NGINX template.
"""
return pulumi.get(self, "custom_template")
@custom_template.setter
def custom_template(self, value: Optional[pulumi.Input['ControllerCustomTemplateArgs']]):
pulumi.set(self, "custom_template", value)
@property
@pulumi.getter(name="dnsConfig")
def dns_config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Optionally customize the pod dnsConfig.
"""
return pulumi.get(self, "dns_config")
@dns_config.setter
def dns_config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "dns_config", value)
@property
@pulumi.getter(name="dnsPolicy")
def dns_policy(self) -> Optional[pulumi.Input[str]]:
"""
Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
"""
return pulumi.get(self, "dns_policy")
@dns_policy.setter
def dns_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_policy", value)
@property
@pulumi.getter(name="electionID")
def election_id(self) -> Optional[pulumi.Input[str]]:
"""
Election ID to use for status update.
"""
return pulumi.get(self, "election_id")
@election_id.setter
def election_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "election_id", value)
@property
@pulumi.getter(name="enableMimalloc")
def enable_mimalloc(self) -> Optional[pulumi.Input[bool]]:
"""
Enable mimalloc as a drop-in replacement for malloc. ref: https://github.com/microsoft/mimalloc.
"""
return pulumi.get(self, "enable_mimalloc")
@enable_mimalloc.setter
def enable_mimalloc(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_mimalloc", value)
@property
@pulumi.getter(name="existingPsp")
def existing_psp(self) -> Optional[pulumi.Input[str]]:
"""
Use an existing PSP instead of creating one.
"""
return pulumi.get(self, "existing_psp")
@existing_psp.setter
def existing_psp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "existing_psp", value)
@property
@pulumi.getter(name="extraArgs")
def extra_args(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use `default-ssl-certificate: "<namespace>/<secret_name>"`.
"""
return pulumi.get(self, "extra_args")
@extra_args.setter
def extra_args(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "extra_args", value)
@property
@pulumi.getter(name="extraContainers")
def extra_containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]]:
"""
Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
"""
return pulumi.get(self, "extra_containers")
@extra_containers.setter
def extra_containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]]):
pulumi.set(self, "extra_containers", value)
@property
@pulumi.getter(name="extraEnvs")
def extra_envs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]:
"""
Additional environment variables to set.
"""
return pulumi.get(self, "extra_envs")
@extra_envs.setter
def extra_envs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]):
pulumi.set(self, "extra_envs", value)
@property
@pulumi.getter(name="extraInitContainers")
def extra_init_containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]]:
"""
Containers, which are run before the app containers are started. - name: init-myservice image: busybox command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
"""
return pulumi.get(self, "extra_init_containers")
@extra_init_containers.setter
def extra_init_containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]]):
pulumi.set(self, "extra_init_containers", value)
@property
@pulumi.getter(name="extraVolumeMounts")
def extra_volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]:
"""
Additional volumeMounts to the controller main container. - name: copy-portal-skins mountPath: /var/lib/lemonldap-ng/portal/skins
"""
return pulumi.get(self, "extra_volume_mounts")
@extra_volume_mounts.setter
def extra_volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]):
pulumi.set(self, "extra_volume_mounts", value)
@property
@pulumi.getter(name="extraVolumes")
def extra_volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]:
"""
Additional volumes to the controller pod. - name: copy-portal-skins emptyDir: {}
"""
return pulumi.get(self, "extra_volumes")
@extra_volumes.setter
def extra_volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]):
pulumi.set(self, "extra_volumes", value)
@property
@pulumi.getter(name="healthCheckPath")
def health_check_path(self) -> Optional[pulumi.Input[str]]:
"""
Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path.
"""
return pulumi.get(self, "health_check_path")
@health_check_path.setter
def health_check_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_check_path", value)
@property
@pulumi.getter(name="heathCheckHost")
def heath_check_host(self) -> Optional[pulumi.Input[str]]:
"""
Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the hostNetwork: true mode.
"""
return pulumi.get(self, "heath_check_host")
@heath_check_host.setter
def heath_check_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "heath_check_host", value)
@property
@pulumi.getter(name="hostNetwork")
def host_network(self) -> Optional[pulumi.Input[bool]]:
"""
Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged.
"""
return pulumi.get(self, "host_network")
@host_network.setter
def host_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_network", value)
@property
@pulumi.getter(name="hostPort")
def host_port(self) -> Optional[pulumi.Input['ControllerHostPortArgs']]:
"""
Use host ports 80 and 443. Disabled by default.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input['ControllerHostPortArgs']]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Optionally customize the pod hostname.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['ControllerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['ControllerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="ingressClassByName")
def ingress_class_by_name(self) -> Optional[pulumi.Input[bool]]:
"""
Process IngressClass per name (additionally as per spec.controller).
"""
return pulumi.get(self, "ingress_class_by_name")
@ingress_class_by_name.setter
def ingress_class_by_name(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ingress_class_by_name", value)
@property
@pulumi.getter(name="ingressClassResource")
def ingress_class_resource(self) -> Optional[pulumi.Input['ControllerIngressClassResourceArgs']]:
"""
This section refers to the creation of the IngressClass resource. IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19
"""
return pulumi.get(self, "ingress_class_resource")
@ingress_class_resource.setter
def ingress_class_resource(self, value: Optional[pulumi.Input['ControllerIngressClassResourceArgs']]):
pulumi.set(self, "ingress_class_resource", value)
@property
@pulumi.getter
def keda(self) -> Optional[pulumi.Input['KedaArgs']]:
"""
Mutually exclusive with hpa autoscaling.
"""
return pulumi.get(self, "keda")
@keda.setter
def keda(self, value: Optional[pulumi.Input['KedaArgs']]):
pulumi.set(self, "keda", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
DaemonSet or Deployment.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs']]:
"""
Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Liveness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter(name="maxmindLicenseKey")
def maxmind_license_key(self) -> Optional[pulumi.Input[str]]:
"""
Maxmind license key to download GeoLite2 Databases https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases.
"""
return pulumi.get(self, "maxmind_license_key")
@maxmind_license_key.setter
def maxmind_license_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maxmind_license_key", value)
@property
@pulumi.getter
def metrics(self) -> Optional[pulumi.Input['ControllerMetricsArgs']]:
return pulumi.get(self, "metrics")
@metrics.setter
def metrics(self, value: Optional[pulumi.Input['ControllerMetricsArgs']]):
pulumi.set(self, "metrics", value)
@property
@pulumi.getter(name="minAvailable")
def min_available(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_available")
@min_available.setter
def min_available(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_available", value)
@property
@pulumi.getter(name="minReadySeconds")
def min_ready_seconds(self) -> Optional[pulumi.Input[int]]:
"""
minReadySeconds to avoid killing pods before we are ready.
"""
return pulumi.get(self, "min_ready_seconds")
@min_ready_seconds.setter
def min_ready_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_ready_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Node labels for controller pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
"""
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to controller pods.
"""
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="podLabels")
def pod_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
labels to add to the pod container metadata.
"""
return pulumi.get(self, "pod_labels")
@pod_labels.setter
def pod_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "pod_labels", value)
@property
@pulumi.getter(name="podSecurityContext")
def pod_security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]:
"""
Security Context policies for controller pods.
"""
return pulumi.get(self, "pod_security_context")
@pod_security_context.setter
def pod_security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]):
pulumi.set(self, "pod_security_context", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="proxySetHeaders")
def proxy_set_headers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers.
"""
return pulumi.get(self, "proxy_set_headers")
@proxy_set_headers.setter
def proxy_set_headers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "proxy_set_headers", value)
@property
@pulumi.getter(name="publishService")
def publish_service(self) -> Optional[pulumi.Input['ControllerPublishServiceArgs']]:
"""
Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running.
"""
return pulumi.get(self, "publish_service")
@publish_service.setter
def publish_service(self, value: Optional[pulumi.Input['ControllerPublishServiceArgs']]):
pulumi.set(self, "publish_service", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Readiness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter(name="reportNodeInternalIp")
def report_node_internal_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply.
"""
return pulumi.get(self, "report_node_internal_ip")
@report_node_internal_ip.setter
def report_node_internal_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "report_node_internal_ip", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
"""
Define requests resources to avoid probe issues due to CPU utilization in busy nodes ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 Ideally, there should be no limits. https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input['ControllerScopeArgs']]:
"""
Limit the scope of the controller.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input['ControllerScopeArgs']]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="startupProbe")
def startup_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Startup probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "startup_probe")
@startup_probe.setter
def startup_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "startup_probe", value)
@property
@pulumi.getter
def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
"""
return pulumi.get(self, "sysctls")
@sysctls.setter
def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "sysctls", value)
@property
@pulumi.getter
def tcp(self) -> Optional[pulumi.Input['ControllerTcpArgs']]:
"""
Allows customization of the tcp-services-configmap.
"""
return pulumi.get(self, "tcp")
@tcp.setter
def tcp(self, value: Optional[pulumi.Input['ControllerTcpArgs']]):
pulumi.set(self, "tcp", value)
@property
@pulumi.getter(name="terminateGracePeriodSeconds")
def terminate_grace_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How long to wait for the drain of connections.
"""
return pulumi.get(self, "terminate_grace_period_seconds")
@terminate_grace_period_seconds.setter
def terminate_grace_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "terminate_grace_period_seconds", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]:
"""
Node tolerations for server scheduling to nodes with taints Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/.
"""
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]):
pulumi.set(self, "tolerations", value)
@property
@pulumi.getter(name="topologySpreadConstraints")
def topology_spread_constraints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]]]:
"""
Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/.
"""
return pulumi.get(self, "topology_spread_constraints")
@topology_spread_constraints.setter
def topology_spread_constraints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]]]):
pulumi.set(self, "topology_spread_constraints", value)
@property
@pulumi.getter
def udp(self) -> Optional[pulumi.Input['ControllerUdpArgs']]:
return pulumi.get(self, "udp")
@udp.setter
def udp(self, value: Optional[pulumi.Input['ControllerUdpArgs']]):
pulumi.set(self, "udp", value)
@property
@pulumi.getter(name="updateStrategy")
def update_strategy(self) -> Optional[pulumi.Input['ControllerUpdateStrategyArgs']]:
"""
The update strategy to apply to the Deployment or DaemonSet.
"""
return pulumi.get(self, "update_strategy")
@update_strategy.setter
def update_strategy(self, value: Optional[pulumi.Input['ControllerUpdateStrategyArgs']]):
pulumi.set(self, "update_strategy", value)
@property
@pulumi.getter(name="watchIngressWithoutClass")
def watch_ingress_without_class(self) -> Optional[pulumi.Input[bool]]:
"""
Process Ingress objects without ingressClass annotation/ingressClassName field. Overrides value for --watch-ingress-without-class flag of the controller binary. Defaults to false.
"""
return pulumi.get(self, "watch_ingress_without_class")
@watch_ingress_without_class.setter
def watch_ingress_without_class(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "watch_ingress_without_class", value)
@pulumi.input_type
class KedaScaledObjectArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Custom annotations for ScaledObject resource.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Custom annotations for ScaledObject resource.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@pulumi.input_type
class KedaTriggerArgs:
def __init__(__self__, *,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
type: Optional[pulumi.Input[str]] = None):
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class KedaArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
behavior: Optional[pulumi.Input['AutoscalingBehaviorArgs']] = None,
cooldown_period: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
max_replicas: Optional[pulumi.Input[int]] = None,
min_replicas: Optional[pulumi.Input[int]] = None,
polling_interval: Optional[pulumi.Input[int]] = None,
restore_to_original_replica_count: Optional[pulumi.Input[bool]] = None,
scaled_object: Optional[pulumi.Input['KedaScaledObjectArgs']] = None,
triggers: Optional[pulumi.Input[Sequence[pulumi.Input['KedaTriggerArgs']]]] = None):
"""
:param pulumi.Input[str] api_version: apiVersion changes with keda 1.x vs 2.x: 2.x = keda.sh/v1alpha1, 1.x = keda.k8s.io/v1alpha1.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if behavior is not None:
pulumi.set(__self__, "behavior", behavior)
if cooldown_period is not None:
pulumi.set(__self__, "cooldown_period", cooldown_period)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if max_replicas is not None:
pulumi.set(__self__, "max_replicas", max_replicas)
if min_replicas is not None:
pulumi.set(__self__, "min_replicas", min_replicas)
if polling_interval is not None:
pulumi.set(__self__, "polling_interval", polling_interval)
if restore_to_original_replica_count is not None:
pulumi.set(__self__, "restore_to_original_replica_count", restore_to_original_replica_count)
if scaled_object is not None:
pulumi.set(__self__, "scaled_object", scaled_object)
if triggers is not None:
pulumi.set(__self__, "triggers", triggers)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
apiVersion changes with keda 1.x vs 2.x: 2.x = keda.sh/v1alpha1, 1.x = keda.k8s.io/v1alpha1.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def behavior(self) -> Optional[pulumi.Input['AutoscalingBehaviorArgs']]:
return pulumi.get(self, "behavior")
@behavior.setter
def behavior(self, value: Optional[pulumi.Input['AutoscalingBehaviorArgs']]):
pulumi.set(self, "behavior", value)
@property
@pulumi.getter(name="cooldownPeriod")
def cooldown_period(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cooldown_period")
@cooldown_period.setter
def cooldown_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown_period", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="maxReplicas")
def max_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_replicas")
@max_replicas.setter
def max_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_replicas", value)
@property
@pulumi.getter(name="minReplicas")
def min_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_replicas")
@min_replicas.setter
def min_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replicas", value)
@property
@pulumi.getter(name="pollingInterval")
def polling_interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "polling_interval")
@polling_interval.setter
def polling_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "polling_interval", value)
@property
@pulumi.getter(name="restoreToOriginalReplicaCount")
def restore_to_original_replica_count(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "restore_to_original_replica_count")
@restore_to_original_replica_count.setter
def restore_to_original_replica_count(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "restore_to_original_replica_count", value)
@property
@pulumi.getter(name="scaledObject")
def scaled_object(self) -> Optional[pulumi.Input['KedaScaledObjectArgs']]:
return pulumi.get(self, "scaled_object")
@scaled_object.setter
def scaled_object(self, value: Optional[pulumi.Input['KedaScaledObjectArgs']]):
pulumi.set(self, "scaled_object", value)
@property
@pulumi.getter
def triggers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KedaTriggerArgs']]]]:
return pulumi.get(self, "triggers")
@triggers.setter
def triggers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KedaTriggerArgs']]]]):
pulumi.set(self, "triggers", value)
@pulumi.input_type
class ReleaseArgs:
def __init__(__self__, *,
atomic: Optional[pulumi.Input[bool]] = None,
chart: Optional[pulumi.Input[str]] = None,
cleanup_on_fail: Optional[pulumi.Input[bool]] = None,
create_namespace: Optional[pulumi.Input[bool]] = None,
dependency_update: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
devel: Optional[pulumi.Input[bool]] = None,
disable_crd_hooks: Optional[pulumi.Input[bool]] = None,
disable_openapi_validation: Optional[pulumi.Input[bool]] = None,
disable_webhooks: Optional[pulumi.Input[bool]] = None,
force_update: Optional[pulumi.Input[bool]] = None,
keyring: Optional[pulumi.Input[str]] = None,
lint: Optional[pulumi.Input[bool]] = None,
manifest: Optional[pulumi.Input[Mapping[str, Any]]] = None,
max_history: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
postrender: Optional[pulumi.Input[str]] = None,
recreate_pods: Optional[pulumi.Input[bool]] = None,
render_subchart_notes: Optional[pulumi.Input[bool]] = None,
replace: Optional[pulumi.Input[bool]] = None,
repository_opts: Optional[pulumi.Input['RepositoryOptsArgs']] = None,
reset_values: Optional[pulumi.Input[bool]] = None,
resource_names: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]] = None,
reuse_values: Optional[pulumi.Input[bool]] = None,
skip_await: Optional[pulumi.Input[bool]] = None,
skip_crds: Optional[pulumi.Input[bool]] = None,
timeout: Optional[pulumi.Input[int]] = None,
value_yaml_files: Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]]] = None,
values: Optional[pulumi.Input[Mapping[str, Any]]] = None,
verify: Optional[pulumi.Input[bool]] = None,
version: Optional[pulumi.Input[str]] = None,
wait_for_jobs: Optional[pulumi.Input[bool]] = None):
"""
A Release is an instance of a chart running in a Kubernetes cluster.
A Chart is a Helm package. It contains all of the resource definitions necessary to run an application, tool, or service inside of a Kubernetes cluster.
Note - Helm Release is currently in BETA and may change. Use in production environment is discouraged.
:param pulumi.Input[bool] atomic: If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.
:param pulumi.Input[str] chart: Chart name to be installed. A path may be used.
:param pulumi.Input[bool] cleanup_on_fail: Allow deletion of new resources created in this upgrade when upgrade fails.
:param pulumi.Input[bool] create_namespace: Create the namespace if it does not exist.
:param pulumi.Input[bool] dependency_update: Run helm dependency update before installing the chart.
:param pulumi.Input[str] description: Add a custom description
:param pulumi.Input[bool] devel: Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.
:param pulumi.Input[bool] disable_crd_hooks: Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook
:param pulumi.Input[bool] disable_openapi_validation: If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema
:param pulumi.Input[bool] disable_webhooks: Prevent hooks from running.
:param pulumi.Input[bool] force_update: Force resource update through delete/recreate if needed.
:param pulumi.Input[str] keyring: Location of public keys used for verification. Used only if `verify` is true
:param pulumi.Input[bool] lint: Run helm lint when planning.
:param pulumi.Input[Mapping[str, Any]] manifest: The rendered manifests as JSON. Not yet supported.
:param pulumi.Input[int] max_history: Limit the maximum number of revisions saved per release. Use 0 for no limit.
:param pulumi.Input[str] name: Release name.
:param pulumi.Input[str] namespace: Namespace to install the release into.
:param pulumi.Input[str] postrender: Postrender command to run.
:param pulumi.Input[bool] recreate_pods: Perform pods restart during upgrade/rollback.
:param pulumi.Input[bool] render_subchart_notes: If set, render subchart notes along with the parent.
:param pulumi.Input[bool] replace: Re-use the given name, even if that name is already used. This is unsafe in production
:param pulumi.Input['RepositoryOptsArgs'] repository_opts: Specification defining the Helm chart repository to use.
:param pulumi.Input[bool] reset_values: When upgrading, reset the values to the ones built into the chart.
:param pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]] resource_names: Names of resources created by the release grouped by "kind/version".
:param pulumi.Input[bool] reuse_values: When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored
:param pulumi.Input[bool] skip_await: By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.
:param pulumi.Input[bool] skip_crds: If set, no CRDs will be installed. By default, CRDs are installed if not already present.
:param pulumi.Input[int] timeout: Time in seconds to wait for any individual kubernetes operation.
:param pulumi.Input[Sequence[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]] value_yaml_files: List of assets (raw yaml files). Content is read and merged with values. Not yet supported.
:param pulumi.Input[Mapping[str, Any]] values: Custom values set for the release.
:param pulumi.Input[bool] verify: Verify the package before installing it.
:param pulumi.Input[str] version: Specify the exact chart version to install. If this is not specified, the latest version is installed.
:param pulumi.Input[bool] wait_for_jobs: Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.
"""
if atomic is not None:
pulumi.set(__self__, "atomic", atomic)
if chart is not None:
pulumi.set(__self__, "chart", chart)
if cleanup_on_fail is not None:
pulumi.set(__self__, "cleanup_on_fail", cleanup_on_fail)
if create_namespace is not None:
pulumi.set(__self__, "create_namespace", create_namespace)
if dependency_update is not None:
pulumi.set(__self__, "dependency_update", dependency_update)
if description is not None:
pulumi.set(__self__, "description", description)
if devel is not None:
pulumi.set(__self__, "devel", devel)
if disable_crd_hooks is not None:
pulumi.set(__self__, "disable_crd_hooks", disable_crd_hooks)
if disable_openapi_validation is not None:
pulumi.set(__self__, "disable_openapi_validation", disable_openapi_validation)
if disable_webhooks is not None:
pulumi.set(__self__, "disable_webhooks", disable_webhooks)
if force_update is not None:
pulumi.set(__self__, "force_update", force_update)
if keyring is not None:
pulumi.set(__self__, "keyring", keyring)
if lint is not None:
pulumi.set(__self__, "lint", lint)
if manifest is not None:
pulumi.set(__self__, "manifest", manifest)
if max_history is not None:
pulumi.set(__self__, "max_history", max_history)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if postrender is not None:
pulumi.set(__self__, "postrender", postrender)
if recreate_pods is not None:
pulumi.set(__self__, "recreate_pods", recreate_pods)
if render_subchart_notes is not None:
pulumi.set(__self__, "render_subchart_notes", render_subchart_notes)
if replace is not None:
pulumi.set(__self__, "replace", replace)
if repository_opts is not None:
pulumi.set(__self__, "repository_opts", repository_opts)
if reset_values is not None:
pulumi.set(__self__, "reset_values", reset_values)
if resource_names is not None:
pulumi.set(__self__, "resource_names", resource_names)
if reuse_values is not None:
pulumi.set(__self__, "reuse_values", reuse_values)
if skip_await is not None:
pulumi.set(__self__, "skip_await", skip_await)
if skip_crds is not None:
pulumi.set(__self__, "skip_crds", skip_crds)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if value_yaml_files is not None:
pulumi.set(__self__, "value_yaml_files", value_yaml_files)
if values is not None:
pulumi.set(__self__, "values", values)
if verify is not None:
pulumi.set(__self__, "verify", verify)
if version is not None:
pulumi.set(__self__, "version", version)
if wait_for_jobs is not None:
pulumi.set(__self__, "wait_for_jobs", wait_for_jobs)
@property
@pulumi.getter
def atomic(self) -> Optional[pulumi.Input[bool]]:
"""
If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.
"""
return pulumi.get(self, "atomic")
@atomic.setter
def atomic(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "atomic", value)
@property
@pulumi.getter
def chart(self) -> Optional[pulumi.Input[str]]:
"""
Chart name to be installed. A path may be used.
"""
return pulumi.get(self, "chart")
@chart.setter
def chart(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "chart", value)
@property
@pulumi.getter(name="cleanupOnFail")
def cleanup_on_fail(self) -> Optional[pulumi.Input[bool]]:
"""
Allow deletion of new resources created in this upgrade when upgrade fails.
"""
return pulumi.get(self, "cleanup_on_fail")
@cleanup_on_fail.setter
def cleanup_on_fail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cleanup_on_fail", value)
@property
@pulumi.getter(name="createNamespace")
def create_namespace(self) -> Optional[pulumi.Input[bool]]:
"""
Create the namespace if it does not exist.
"""
return pulumi.get(self, "create_namespace")
@create_namespace.setter
def create_namespace(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "create_namespace", value)
@property
@pulumi.getter(name="dependencyUpdate")
def dependency_update(self) -> Optional[pulumi.Input[bool]]:
"""
Run helm dependency update before installing the chart.
"""
return pulumi.get(self, "dependency_update")
@dependency_update.setter
def dependency_update(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "dependency_update", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Add a custom description
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def devel(self) -> Optional[pulumi.Input[bool]]:
"""
Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.
"""
return pulumi.get(self, "devel")
@devel.setter
def devel(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "devel", value)
@property
@pulumi.getter(name="disableCRDHooks")
def disable_crd_hooks(self) -> Optional[pulumi.Input[bool]]:
"""
Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook
"""
return pulumi.get(self, "disable_crd_hooks")
@disable_crd_hooks.setter
def disable_crd_hooks(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_crd_hooks", value)
@property
@pulumi.getter(name="disableOpenapiValidation")
def disable_openapi_validation(self) -> Optional[pulumi.Input[bool]]:
"""
If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema
"""
return pulumi.get(self, "disable_openapi_validation")
@disable_openapi_validation.setter
def disable_openapi_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_openapi_validation", value)
@property
@pulumi.getter(name="disableWebhooks")
def disable_webhooks(self) -> Optional[pulumi.Input[bool]]:
"""
Prevent hooks from running.
"""
return pulumi.get(self, "disable_webhooks")
@disable_webhooks.setter
def disable_webhooks(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_webhooks", value)
@property
@pulumi.getter(name="forceUpdate")
def force_update(self) -> Optional[pulumi.Input[bool]]:
"""
Force resource update through delete/recreate if needed.
"""
return pulumi.get(self, "force_update")
@force_update.setter
def force_update(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_update", value)
@property
@pulumi.getter
def keyring(self) -> Optional[pulumi.Input[str]]:
"""
Location of public keys used for verification. Used only if `verify` is true
"""
return pulumi.get(self, "keyring")
@keyring.setter
def keyring(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "keyring", value)
@property
@pulumi.getter
def lint(self) -> Optional[pulumi.Input[bool]]:
"""
Run helm lint when planning.
"""
return pulumi.get(self, "lint")
@lint.setter
def lint(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "lint", value)
@property
@pulumi.getter
def manifest(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The rendered manifests as JSON. Not yet supported.
"""
return pulumi.get(self, "manifest")
@manifest.setter
def manifest(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "manifest", value)
@property
@pulumi.getter(name="maxHistory")
def max_history(self) -> Optional[pulumi.Input[int]]:
"""
Limit the maximum number of revisions saved per release. Use 0 for no limit.
"""
return pulumi.get(self, "max_history")
@max_history.setter
def max_history(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_history", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Release name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace to install the release into.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def postrender(self) -> Optional[pulumi.Input[str]]:
"""
Postrender command to run.
"""
return pulumi.get(self, "postrender")
@postrender.setter
def postrender(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "postrender", value)
@property
@pulumi.getter(name="recreatePods")
def recreate_pods(self) -> Optional[pulumi.Input[bool]]:
"""
Perform pods restart during upgrade/rollback.
"""
return pulumi.get(self, "recreate_pods")
@recreate_pods.setter
def recreate_pods(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "recreate_pods", value)
@property
@pulumi.getter(name="renderSubchartNotes")
def render_subchart_notes(self) -> Optional[pulumi.Input[bool]]:
"""
If set, render subchart notes along with the parent.
"""
return pulumi.get(self, "render_subchart_notes")
@render_subchart_notes.setter
def render_subchart_notes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "render_subchart_notes", value)
@property
@pulumi.getter
def replace(self) -> Optional[pulumi.Input[bool]]:
"""
Re-use the given name, even if that name is already used. This is unsafe in production
"""
return pulumi.get(self, "replace")
@replace.setter
def replace(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "replace", value)
@property
@pulumi.getter(name="repositoryOpts")
def repository_opts(self) -> Optional[pulumi.Input['RepositoryOptsArgs']]:
"""
Specification defining the Helm chart repository to use.
"""
return pulumi.get(self, "repository_opts")
@repository_opts.setter
def repository_opts(self, value: Optional[pulumi.Input['RepositoryOptsArgs']]):
pulumi.set(self, "repository_opts", value)
@property
@pulumi.getter(name="resetValues")
def reset_values(self) -> Optional[pulumi.Input[bool]]:
"""
When upgrading, reset the values to the ones built into the chart.
"""
return pulumi.get(self, "reset_values")
@reset_values.setter
def reset_values(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "reset_values", value)
@property
@pulumi.getter(name="resourceNames")
def resource_names(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]]:
"""
Names of resources created by the release grouped by "kind/version".
"""
return pulumi.get(self, "resource_names")
@resource_names.setter
def resource_names(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]]):
pulumi.set(self, "resource_names", value)
@property
@pulumi.getter(name="reuseValues")
def reuse_values(self) -> Optional[pulumi.Input[bool]]:
"""
When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored
"""
return pulumi.get(self, "reuse_values")
@reuse_values.setter
def reuse_values(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "reuse_values", value)
@property
@pulumi.getter(name="skipAwait")
def skip_await(self) -> Optional[pulumi.Input[bool]]:
"""
By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.
"""
return pulumi.get(self, "skip_await")
@skip_await.setter
def skip_await(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_await", value)
@property
@pulumi.getter(name="skipCrds")
def skip_crds(self) -> Optional[pulumi.Input[bool]]:
"""
If set, no CRDs will be installed. By default, CRDs are installed if not already present.
"""
return pulumi.get(self, "skip_crds")
@skip_crds.setter
def skip_crds(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_crds", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
Time in seconds to wait for any individual kubernetes operation.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter(name="valueYamlFiles")
def value_yaml_files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]]]:
"""
List of assets (raw yaml files). Content is read and merged with values. Not yet supported.
"""
return pulumi.get(self, "value_yaml_files")
@value_yaml_files.setter
def value_yaml_files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]]]):
pulumi.set(self, "value_yaml_files", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Custom values set for the release.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def verify(self) -> Optional[pulumi.Input[bool]]:
"""
Verify the package before installing it.
"""
return pulumi.get(self, "verify")
@verify.setter
def verify(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "verify", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Specify the exact chart version to install. If this is not specified, the latest version is installed.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="waitForJobs")
def wait_for_jobs(self) -> Optional[pulumi.Input[bool]]:
"""
Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.
"""
return pulumi.get(self, "wait_for_jobs")
@wait_for_jobs.setter
def wait_for_jobs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_jobs", value)
@pulumi.input_type
class RepositoryOptsArgs:
def __init__(__self__, *,
ca_file: Optional[pulumi.Input[str]] = None,
cert_file: Optional[pulumi.Input[str]] = None,
key_file: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
repo: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Specification defining the Helm chart repository to use.
:param pulumi.Input[str] ca_file: The Repository's CA File
:param pulumi.Input[str] cert_file: The repository's cert file
:param pulumi.Input[str] key_file: The repository's cert key file
:param pulumi.Input[str] password: Password for HTTP basic authentication
:param pulumi.Input[str] repo: Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.
:param pulumi.Input[str] username: Username for HTTP basic authentication
"""
if ca_file is not None:
pulumi.set(__self__, "ca_file", ca_file)
if cert_file is not None:
pulumi.set(__self__, "cert_file", cert_file)
if key_file is not None:
pulumi.set(__self__, "key_file", key_file)
if password is not None:
pulumi.set(__self__, "password", password)
if repo is not None:
pulumi.set(__self__, "repo", repo)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="caFile")
def ca_file(self) -> Optional[pulumi.Input[str]]:
"""
The Repository's CA File
"""
return pulumi.get(self, "ca_file")
@ca_file.setter
def ca_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_file", value)
@property
@pulumi.getter(name="certFile")
def cert_file(self) -> Optional[pulumi.Input[str]]:
"""
The repository's cert file
"""
return pulumi.get(self, "cert_file")
@cert_file.setter
def cert_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cert_file", value)
@property
@pulumi.getter(name="keyFile")
def key_file(self) -> Optional[pulumi.Input[str]]:
"""
The repository's cert key file
"""
return pulumi.get(self, "key_file")
@key_file.setter
def key_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_file", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password for HTTP basic authentication
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def repo(self) -> Optional[pulumi.Input[str]]:
"""
Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.
"""
return pulumi.get(self, "repo")
@repo.setter
def repo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username for HTTP basic authentication
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
| 44.522944 | 546 | 0.675118 | 22,526 | 191,137 | 5.554115 | 0.038666 | 0.120276 | 0.132577 | 0.056397 | 0.85927 | 0.769463 | 0.72531 | 0.675882 | 0.660416 | 0.617806 | 0 | 0.00168 | 0.205784 | 191,137 | 4,292 | 547 | 44.533318 | 0.822488 | 0.170511 | 0 | 0.5589 | 1 | 0 | 0.133228 | 0.061564 | 0 | 0 | 0 | 0 | 0 | 1 | 0.202265 | false | 0.002589 | 0.001942 | 0.054693 | 0.312298 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2cadc79c172c8eae5ebb8268a4fce6df032492ff | 35 | py | Python | RESTfacebook/__init__.py | JoeyDP/REST-Facebook | 6035f9d66e98020eb601b437bd5f559eccd37c17 | [
"MIT"
] | null | null | null | RESTfacebook/__init__.py | JoeyDP/REST-Facebook | 6035f9d66e98020eb601b437bd5f559eccd37c17 | [
"MIT"
] | null | null | null | RESTfacebook/__init__.py | JoeyDP/REST-Facebook | 6035f9d66e98020eb601b437bd5f559eccd37c17 | [
"MIT"
] | null | null | null |
from .facebook import FacebookAPI
| 11.666667 | 33 | 0.828571 | 4 | 35 | 7.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 35 | 2 | 34 | 17.5 | 0.966667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e2ec198a4d03cfb5ee8bf6c201a6c08ed4395cd3 | 20,930 | py | Python | qark/modules/findBroadcasts.py | pragyan1994/Jenkins_Integration | 747382d5c6d5cd835d0c7c3324956e8e352876ac | [
"Apache-2.0"
] | 1 | 2017-12-02T21:34:26.000Z | 2017-12-02T21:34:26.000Z | qark/modules/findBroadcasts.py | pragyan1994/Jenkins_Integration | 747382d5c6d5cd835d0c7c3324956e8e352876ac | [
"Apache-2.0"
] | null | null | null | qark/modules/findBroadcasts.py | pragyan1994/Jenkins_Integration | 747382d5c6d5cd835d0c7c3324956e8e352876ac | [
"Apache-2.0"
] | 1 | 2018-05-12T16:01:58.000Z | 2018-05-12T16:01:58.000Z | from __future__ import absolute_import
'''Copyright 2015 LinkedIn Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.'''
import re
import logging
import lib.plyj.model as m
import lib.plyj.parser as plyj
from modules import common
from modules import report
from modules.report import ReportIssue
from modules.common import Severity, ReportIssue
from modules.createExploit import ExploitType
from lib.pubsub import pub
from modules.common import terminalPrint
common.logger = logging.getLogger()
logger = logging.getLogger(__name__)
parser = plyj.Parser()
current_file=''
tree=''
importFound='False'
def main(queue):
global current_file
global parser
global tree
results = []
count = 0
common.logger.debug("Checking for any broadcasts sent from this app......")
for j in common.java_files:
count = count + 1
pub.sendMessage('progress', bar="Broadcast issues", percent=round(count*100/common.java_files.__len__()))
current_file=j
try:
tree=parser.parse_file(j)
if type(tree) is not None:
if hasattr(tree,'type_declarations'):
for type_decl in tree.type_declarations:
if type(type_decl) is m.ClassDeclaration:
for t in type_decl.body:
try:
recursive_broadcast_finder(t,results)
except Exception as e:
common.parsingerrors.add(str(j))
common.logger.debug("Unable to process recursive_broadcast_finder in findBroadcasts.py: " + str(e))
elif type(type_decl) is list:
for y in type_decl:
recursive_broadcast_finder(y,results)
elif hasattr(type_decl,'_fields'):
for d in type_decl._fields:
recursive_broadcast_finder(getattr(type_decl,d),results)
else:
common.logger.debug("Unable to create tree for " + str(j))
except Exception as e:
common.logger.debug("Tree exception during broadcast processing: " + str(e))
common.parsingerrors.add(str(j))
queue.put(results)
return
def local_broadcast_manager_imported():
'''
Need to ensure sendBroadcast is not the method from LocalBroadcastManager, which is not insecure
'''
#To be thorough,we need to run through the whole import dance, but we'll save that for planned refactor
#This will have to do for now
global tree
global importFound
for imp_decl in tree.import_declarations:
if str(imp_decl.name.value)=="android.support.v4.content.LocalBroadcastManager":
importFound=True
elif str(imp_decl.name.value)=="android.support.v4.content.*":
importFound=True
elif str(imp_decl.name.value)=="android.support.v4.*":
importFound=True
elif str(imp_decl.name.value)=="android.support.*":
importFound=True
elif str(imp_decl.name.value)=="android.*":
importFound=True
return importFound
def recursive_broadcast_finder(t,results):
if type(t) is m.MethodDeclaration:
if str(t.name) == 'sendBroadcast':
common.logger.debug("It appears the sendBroadcast method may be overridden in this class. The following findings for this class may be false positives")
if str(t.name) == 'sendBroadcastAsUser':
common.logger.debug("It appears the sendBroadcastAsUser method may be overridden in this class. The following findings for this class may be false positives")
if str(t.name) == 'sendOrderedBroadcast':
common.logger.debug("It appears the sendOrderedBroadcast method may be overridden in this class. The following findings for this class may be false positives")
if str(t.name) == 'sendOrderedBroadcastAsUser':
common.logger.debug("It appears the sendOrderedBroadcastAsUser method may be overridden in this class. The following findings for this class may be false positives")
if str(t.name) == 'sendStickyBroadcast':
common.logger.debug("It appears the sendStickyBroadcast method may be overridden in this class. The following findings for this class may be false positives")
if str(t.name) == 'sendStickyBroadcastAsUser':
common.logger.debug("It appears the sendStickyBroadcastAsUser method may be overridden in this class. The following findings for this class may be false positives")
if str(t.name) == 'sendStickyOrderedBroadcast':
common.logger.debug("It appears the sendStickyOrderedBroadcast method may be overridden in this class. The following findings for this class may be false positives")
if str(t.name) == 'sendStickyOrderedBroadcastAsUser':
common.logger.debug("It appears the sendStickyOrderedBroadcastAsUser method may be overridden in this class. The following findings for this class may be false positives")
if type(t) is m.MethodInvocation:
if str(t.name) == 'sendBroadcast':
if len(t.arguments)==1:
#We need to ensure this isn't a local broadcast
#TODO - There is a lot more we need to do to fully qualify this, but should be good enough for now
if local_broadcast_manager_imported()==True:
common.logger.debug(tree)
else:
report.write_badger("manifest-issues", modules.common.Severity.INFO, "NO IMPORT")
common.logger.debug("FOUND A sendBroadcast")
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A broadcast is sent from this class: " + str(current_file) + ", which does not specify the receiverPermission. This means any application on the device can receive this broadcast. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("A broadcast is sent from this class: " + str(current_file) + ", which does not specify the receiverPermission. This means any application on the device can receive this broadcast. You should investigate this for potential data leakage.")
results.append(issue)
elif len(t.arguments)==2:
if common.minSdkVersion<21:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A broadcast is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("A broadcast is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
results.append(issue)
else:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A broadcast is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but depending on the protection level of the permission (on the receiving app side), may still be vulnerable to interception, if the protection level of the permission is not set to signature or signatureOrSystem. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("A broadcast is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but depending on the protection level of the permission (on the receiving app side), may still be vulnerable to interception, if the protection level of the permission is not set to signature or signatureOrSystem. You should investigate this for potential data leakage.")
results.append(issue)
elif str(t.name) == 'sendBroadcastAsUser':
if len(t.arguments)==2:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which does not specify the receiverPermission. This means any application on the device can receive this broadcast. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("A broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which does not specify the receiverPermission. This means any application on the device can receive this broadcast. You should investigate this for potential data leakage.")
results.append(issue)
elif len(t.arguments)==3:
if common.minSdkVersion<21:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("A broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
results.append(issue)
else:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but depending on the protection level of the permission (on the receiving app side), may still be vulnerable to interception, if the protection level of the permission is not set to signature or signatureOrSystem. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("A broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but depending on the protection level of the permission (on the receiving app side), may still be vulnerable to interception, if the protection level of the permission is not set to signature or signatureOrSystem. You should investigate this for potential data leakage.")
results.append(issue)
elif str(t.name) == 'sendOrderedBroadcast':
if ((len(t.arguments)==2) or (len(t.arguments)==7)):
if common.minSdkVersion<21:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("An ordered broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("An ordered broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
results.append(issue)
else:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("An ordered broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("An ordered broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
results.append(issue)
elif str(t.name) == 'sendOrderedBroadcastAsUser':
if len(t.arguments)==7:
if common.minSdkVersion<21:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("An ordered broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("An ordered broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but may still be vulnerable to interception, due to the permission squatting vulnerability in API levels before 21. This means any application, installed prior to the expected receiver(s) on the device can potentially receive this broadcast. You should investigate this for potential data leakage.")
results.append(issue)
else:
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("An ordered broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but depending on the protection level of the permission (on the receiving app side), may still be vulnerable to interception, if the protection level of the permission is not set to signature or signatureOrSystem. You should investigate this for potential data leakage.")
issue.setFile(str(current_file))
issue.setSeverity(Severity.WARNING)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.WARNING)
issue.setData("An ordered broadcast, as a specific user, is sent from this class: " + str(current_file) + ", which specifies the receiverPermission, but depending on the protection level of the permission (on the receiving app side), may still be vulnerable to interception, if the protection level of the permission is not set to signature or signatureOrSystem. You should investigate this for potential data leakage.")
results.append(issue)
elif str(t.name) == 'sendStickyBroadcast':
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A sticky broadcast is sent from this class: " + str(current_file) + ". These should not be used, as they provide no security (anyone can access them), no protection (anyone can modify them), and many other problems. For more info: http://developer.android.com/reference/android/content/Context.html")
issue.setFile(str(current_file))
issue.setSeverity(Severity.VULNERABILITY)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.VULNERABILITY)
issue.setData("A sticky broadcast is sent from this class: " + str(current_file) + ". These should not be used, as they provide no security (anyone can access them), no protection (anyone can modify them), and many other problems. For more info: http://developer.android.com/reference/android/content/Context.html")
results.append(issue)
elif str(t.name) == 'sendStickyBroadcastAsUser':
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A sticky user broadcast is sent from this class: " + str(current_file) + ". These should not be used, as they provide no security (anyone can access them), no protection (anyone can modify them), and many other problems. For more info: http://developer.android.com/reference/android/content/Context.html")
issue.setFile(str(current_file))
issue.setSeverity(Severity.VULNERABILITY)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.VULNERABILITY)
issue.setData("A sticky user broadcast is sent from this class: " + str(current_file) + ". These should not be used, as they provide no security (anyone can access them), no protection (anyone can modify them), and many other problems. For more info: http://developer.android.com/reference/android/content/Context.html")
results.append(issue)
elif str(t.name) == 'sendStickyOrderedBroadcast':
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A sticky ordered broadcast is sent from this class: " + str(current_file) + ". These should not be used, as they provide no security (anyone can access them), no protection (anyone can modify them), and many other problems. For more info: http://developer.android.com/reference/android/content/Context.html")
issue.setFile(str(current_file))
issue.setSeverity(Severity.VULNERABILITY)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.VULNERABILITY)
issue.setData("A sticky ordered broadcast is sent from this class: " + str(current_file) + ". These should not be used, as they provide no security (anyone can access them), no protection (anyone can modify them), and many other problems. For more info: http://developer.android.com/reference/android/content/Context.html")
results.append(issue)
elif str(t.name) == 'sendStickyOrderedBroadcastAsUser':
issue = ReportIssue()
issue.setCategory(ExploitType.BROADCAST_INTENT)
issue.setDetails("A sticky ordered user broadcast is sent from this class: " + str(current_file) + ". These should not be used, as they provide no security (anyone can access them), no protection (anyone can modify them), and many other problems. For more info: http://developer.android.com/reference/android/content/Context.html")
issue.setFile(str(current_file))
issue.setSeverity(Severity.VULNERABILITY)
results.append(issue)
issue = terminalPrint()
issue.setLevel(Severity.VULNERABILITY)
issue.setData("A sticky ordered user broadcast is sent from this class: " + str(current_file) + ". These should not be used, as they provide no security (anyone can access them), no protection (anyone can modify them), and many other problems. For more info: http://developer.android.com/reference/android/content/Context.html")
results.append(issue)
elif hasattr(t,'_fields'):
for g in t._fields:
recursive_broadcast_finder(getattr(t,g),results)
elif type(t) is list:
for l in t:
recursive_broadcast_finder(l,results)
elif hasattr(t,'_fields'):
for f in t._fields:
if type(getattr(t,f)) is not str:
recursive_broadcast_finder(getattr(t,f),results)
return
| 68.175896 | 456 | 0.760965 | 2,867 | 20,930 | 5.51308 | 0.099407 | 0.031317 | 0.037201 | 0.024801 | 0.826901 | 0.801847 | 0.783753 | 0.783753 | 0.783753 | 0.777806 | 0 | 0.002893 | 0.157668 | 20,930 | 306 | 457 | 68.398693 | 0.893647 | 0.017678 | 0 | 0.552632 | 0 | 0.135338 | 0.556518 | 0.021353 | 0 | 0 | 0 | 0.003268 | 0 | 1 | 0.011278 | false | 0 | 0.090226 | 0 | 0.112782 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
393b5692033aa5a50648f05a403cf56b3d6666cc | 9,522 | py | Python | tests/model/test_stochastic.py | ycguo028/zhusuan | 244536d93c55e486a3587e53229f0a7e1b19bef0 | [
"MIT"
] | 4 | 2017-05-23T20:18:41.000Z | 2020-03-03T15:00:53.000Z | tests/model/test_stochastic.py | ycguo028/zhusuan | 244536d93c55e486a3587e53229f0a7e1b19bef0 | [
"MIT"
] | null | null | null | tests/model/test_stochastic.py | ycguo028/zhusuan | 244536d93c55e486a3587e53229f0a7e1b19bef0 | [
"MIT"
] | 2 | 2018-11-27T02:43:22.000Z | 2019-11-23T18:27:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import tensorflow as tf
from zhusuan.model.stochastic import *
from zhusuan.model.base import BayesianNet
from zhusuan.model.utils import get_backward_ops
class TestNormal(tf.test.TestCase):
def test_Normal(self):
with BayesianNet():
mean = tf.zeros([2, 3])
logstd = tf.zeros([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Normal('a', mean, logstd, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [mean, logstd, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3]))
log_p_ops = set(get_backward_ops(log_p))
for i in [mean, logstd, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestBernoulli(tf.test.TestCase):
def test_Bernoulli(self):
with BayesianNet():
logits = tf.zeros([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Bernoulli('a', logits, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [logits, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3]))
log_p_ops = set(get_backward_ops(log_p))
for i in [logits, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestCategorical(tf.test.TestCase):
def test_Discrete(self):
with BayesianNet():
logits = tf.zeros([2, 3])
n_samples = tf.placeholder(tf.int32, shape=())
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Categorical('a', logits, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [logits, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.array([0, 1]))
log_p_ops = set(get_backward_ops(log_p))
for i in [logits, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestUniform(tf.test.TestCase):
def test_Uniform(self):
with BayesianNet():
minval = tf.zeros([2, 3])
maxval = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Uniform('a', minval, maxval, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [minval, maxval, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.zeros([2, 3]))
log_p_ops = set(get_backward_ops(log_p))
for i in [minval, maxval, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestGamma(tf.test.TestCase):
def test_Gamma(self):
with BayesianNet():
alpha = tf.ones([2, 3])
beta = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Gamma('a', alpha, beta, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [alpha, beta, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3]))
log_p_ops = set(get_backward_ops(log_p))
for i in [alpha, beta, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestBeta(tf.test.TestCase):
def test_Beta(self):
with BayesianNet():
alpha = tf.ones([2, 3])
beta = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Beta('a', alpha, beta, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [alpha, beta, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3]) * 0.5)
log_p_ops = set(get_backward_ops(log_p))
for i in [alpha, beta, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestPoisson(tf.test.TestCase):
def test_Poisson(self):
with BayesianNet():
rate = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Poisson('a', rate, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [rate, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3], dtype=np.int32))
log_p_ops = set(get_backward_ops(log_p))
for i in [rate, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestBinomial(tf.test.TestCase):
def test_Binomial(self):
with BayesianNet():
logits = tf.zeros([2, 3])
n_experiments = tf.placeholder(tf.int32, shape=[])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Binomial('a', logits, n_experiments, n_samples,
group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [logits, n_experiments, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3], dtype=np.int32))
log_p_ops = set(get_backward_ops(log_p))
for i in [logits, n_experiments, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestMultinomial(tf.test.TestCase):
def test_Multinomial(self):
with BayesianNet():
logits = tf.ones([2, 3])
n_experiments = tf.placeholder(tf.int32, shape=[])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Multinomial('a', logits, n_experiments, n_samples,
group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [logits, n_experiments, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3], dtype=np.int32))
log_p_ops = set(get_backward_ops(log_p))
for i in [logits, n_experiments, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestOnehotCategorical(tf.test.TestCase):
def test_OnehotCategorical(self):
with BayesianNet():
logits = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = OnehotCategorical('a', logits, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [logits, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(tf.one_hot([0, 2], 3, dtype=tf.int32))
log_p_ops = set(get_backward_ops(log_p))
for i in [logits, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestDirichlet(tf.test.TestCase):
def test_Dirichlet(self):
with BayesianNet():
alpha = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Dirichlet('a', alpha, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [alpha, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.array([[0.2, 0.3, 0.5], [0.1, 0.7, 0.2]]))
log_p_ops = set(get_backward_ops(log_p))
for i in [alpha, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestInverseGamma(tf.test.TestCase):
def test_InverseGamma(self):
with BayesianNet():
alpha = tf.ones([2, 3])
beta = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = InverseGamma('a', alpha, beta, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [alpha, beta, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3]))
log_p_ops = set(get_backward_ops(log_p))
for i in [alpha, beta, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
class TestLaplace(tf.test.TestCase):
def test_Laplace(self):
with BayesianNet():
loc = tf.zeros([2, 3])
scale = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
group_event_ndims = tf.placeholder(tf.int32, shape=[])
a = Laplace('a', loc, scale, n_samples, group_event_ndims)
sample_ops = set(get_backward_ops(a.tensor))
for i in [loc, scale, n_samples]:
self.assertTrue(i.op in sample_ops)
log_p = a.log_prob(np.ones([2, 3]))
log_p_ops = set(get_backward_ops(log_p))
for i in [loc, scale, group_event_ndims]:
self.assertTrue(i.op in log_p_ops)
| 41.043103 | 76 | 0.607961 | 1,370 | 9,522 | 3.989051 | 0.075182 | 0.03806 | 0.107045 | 0.10247 | 0.840439 | 0.779506 | 0.779506 | 0.779506 | 0.775114 | 0.759012 | 0 | 0.020356 | 0.267381 | 9,522 | 231 | 77 | 41.220779 | 0.763045 | 0.004411 | 0 | 0.65 | 0 | 0 | 0.001372 | 0 | 0 | 0 | 0 | 0 | 0.13 | 1 | 0.065 | false | 0 | 0.04 | 0 | 0.17 | 0.005 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1a5960fec38fcaf4733d372d77761c72c65e8280 | 21 | py | Python | src/services/f12020/__init__.py | jordansilva/raspberry-f1-dashboard | 96446a348d036a75f4699bab4459eabec16705f8 | [
"Apache-2.0"
] | null | null | null | src/services/f12020/__init__.py | jordansilva/raspberry-f1-dashboard | 96446a348d036a75f4699bab4459eabec16705f8 | [
"Apache-2.0"
] | null | null | null | src/services/f12020/__init__.py | jordansilva/raspberry-f1-dashboard | 96446a348d036a75f4699bab4459eabec16705f8 | [
"Apache-2.0"
] | null | null | null | from .domain import * | 21 | 21 | 0.761905 | 3 | 21 | 5.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 21 | 1 | 21 | 21 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
46bc25338a0a4e3325cf6af4b58db58031b2cc76 | 20 | py | Python | addons14/company_dependent_attribute/models/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-06-10T14:59:13.000Z | 2021-06-10T14:59:13.000Z | addons14/company_dependent_attribute/models/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | null | null | null | addons14/company_dependent_attribute/models/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-04-09T09:44:44.000Z | 2021-04-09T09:44:44.000Z | from . import field
| 10 | 19 | 0.75 | 3 | 20 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 20 | 1 | 20 | 20 | 0.9375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2042ef5c22abd61e21de689873e76bba57ef5a02 | 157 | py | Python | mcsrvstats/exceptions/__init__.py | Darkflame72/mc-server-stats | 991020d3bac9aa453fd38546ef3eab914ce250fa | [
"MIT"
] | 1 | 2020-06-01T21:03:20.000Z | 2020-06-01T21:03:20.000Z | mcsrvstats/exceptions/__init__.py | Darkflame72/mc-server-stats | 991020d3bac9aa453fd38546ef3eab914ce250fa | [
"MIT"
] | 22 | 2020-08-26T05:12:46.000Z | 2021-12-20T15:20:45.000Z | mcsrvstats/exceptions/__init__.py | Obsidion-dev/mc-server-stats | 991020d3bac9aa453fd38546ef3eab914ce250fa | [
"MIT"
] | 2 | 2020-10-31T05:54:56.000Z | 2021-02-15T03:11:32.000Z | """Exceptions for mcsrvstats."""
from .exceptions import ApiError
from .exceptions import PlayerNotFoundError
__all__ = ["ApiError", "PlayerNotFoundError"]
| 26.166667 | 45 | 0.789809 | 14 | 157 | 8.571429 | 0.571429 | 0.233333 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101911 | 157 | 5 | 46 | 31.4 | 0.851064 | 0.165605 | 0 | 0 | 0 | 0 | 0.216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
20470f5893bfc1fbe15db89d3b58d59a2ed1e4a0 | 156 | py | Python | tests/test_import.py | bihealth/varfish-cli | e2b56ef8a158cc7fbe523cbd1c02f13cff8682e5 | [
"MIT"
] | 2 | 2020-09-24T08:01:03.000Z | 2022-03-23T15:49:13.000Z | tests/test_import.py | bihealth/varfish-cli | e2b56ef8a158cc7fbe523cbd1c02f13cff8682e5 | [
"MIT"
] | 9 | 2021-02-16T21:07:35.000Z | 2022-03-24T13:36:07.000Z | tests/test_import.py | bihealth/varfish-cli | e2b56ef8a158cc7fbe523cbd1c02f13cff8682e5 | [
"MIT"
] | 2 | 2022-03-23T15:06:19.000Z | 2022-03-23T15:49:17.000Z | """Test basic imports."""
import varfish_cli
from varfish_cli import __main__
def test_example():
assert varfish_cli.__version__
assert __main__
| 15.6 | 34 | 0.762821 | 20 | 156 | 5.15 | 0.6 | 0.291262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 156 | 9 | 35 | 17.333333 | 0.792308 | 0.121795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.4 | 1 | 0.2 | true | 0 | 0.4 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
645ca09a44e961583fbe6abb3a05d9b1b694876b | 78 | py | Python | src/nmea2tf/__init__.py | naoki-mizuno/nmea2tf | b3e6b4cfbbbeca4026bf488501ed756ecd29ae59 | [
"MIT"
] | 1 | 2020-01-07T01:41:18.000Z | 2020-01-07T01:41:18.000Z | src/nmea2tf/__init__.py | naoki-mizuno/nmea2tf | b3e6b4cfbbbeca4026bf488501ed756ecd29ae59 | [
"MIT"
] | null | null | null | src/nmea2tf/__init__.py | naoki-mizuno/nmea2tf | b3e6b4cfbbbeca4026bf488501ed756ecd29ae59 | [
"MIT"
] | null | null | null | from GGAParser import *
from Converter import *
from MarkerPublisher import *
| 19.5 | 29 | 0.807692 | 9 | 78 | 7 | 0.555556 | 0.31746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 78 | 3 | 30 | 26 | 0.954545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
64864fa6bb6568c5fe4cfaacfbf82655f5ef32e4 | 22,593 | py | Python | tests/src/python/test_qgslayoutsnapper.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | tests/src/python/test_qgslayoutsnapper.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | tests/src/python/test_qgslayoutsnapper.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | 1 | 2021-12-25T08:40:30.000Z | 2021-12-25T08:40:30.000Z | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutSnapper.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '05/07/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import qgis # NOQA
from qgis.core import (QgsProject,
QgsLayout,
QgsLayoutSnapper,
QgsLayoutGridSettings,
QgsLayoutMeasurement,
QgsUnitTypes,
QgsLayoutPoint,
QgsLayoutItemPage,
QgsLayoutGuide,
QgsReadWriteContext,
QgsLayoutItemMap,
QgsLayoutSize)
from qgis.PyQt.QtCore import QPointF, Qt, QRectF
from qgis.PyQt.QtWidgets import QGraphicsLineItem
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
start_app()
class TestQgsLayoutSnapper(unittest.TestCase):
def testGettersSetters(self):
p = QgsProject()
l = QgsLayout(p)
s = QgsLayoutSnapper(l)
s.setSnapToGrid(False)
self.assertFalse(s.snapToGrid())
s.setSnapToGrid(True)
self.assertTrue(s.snapToGrid())
s.setSnapToGuides(False)
self.assertFalse(s.snapToGuides())
s.setSnapToGuides(True)
self.assertTrue(s.snapToGuides())
s.setSnapToItems(False)
self.assertFalse(s.snapToItems())
s.setSnapToItems(True)
self.assertTrue(s.snapToItems())
s.setSnapTolerance(15)
self.assertEqual(s.snapTolerance(), 15)
def testSnapPointToGrid(self):
p = QgsProject()
l = QgsLayout(p)
# need a page to snap to grid
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
l.pageCollection().addPage(page)
s = QgsLayoutSnapper(l)
l.gridSettings().setResolution(QgsLayoutMeasurement(5, QgsUnitTypes.LayoutMillimeters))
s.setSnapToGrid(True)
s.setSnapTolerance(1)
point, snappedX, snappedY = s.snapPointToGrid(QPointF(1, 1), 1)
self.assertTrue(snappedX)
self.assertTrue(snappedY)
self.assertEqual(point, QPointF(0, 0))
point, snappedX, snappedY = s.snapPointToGrid(QPointF(9, 1), 1)
self.assertTrue(snappedX)
self.assertTrue(snappedY)
self.assertEqual(point, QPointF(10, 0))
point, snappedX, snappedY = s.snapPointToGrid(QPointF(1, 11), 1)
self.assertTrue(snappedX)
self.assertTrue(snappedY)
self.assertEqual(point, QPointF(0, 10))
point, snappedX, snappedY = s.snapPointToGrid(QPointF(13, 11), 1)
self.assertFalse(snappedX)
self.assertTrue(snappedY)
self.assertEqual(point, QPointF(13, 10))
point, snappedX, snappedY = s.snapPointToGrid(QPointF(11, 13), 1)
self.assertTrue(snappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(10, 13))
point, snappedX, snappedY = s.snapPointToGrid(QPointF(13, 23), 1)
self.assertFalse(snappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(13, 23))
# grid disabled
s.setSnapToGrid(False)
point, nappedX, snappedY = s.snapPointToGrid(QPointF(1, 1), 1)
self.assertFalse(nappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(1, 1))
s.setSnapToGrid(True)
# with different pixel scale
point, snappedX, snappedY = s.snapPointToGrid(QPointF(0.5, 0.5), 1)
self.assertTrue(snappedX)
self.assertTrue(snappedY)
self.assertEqual(point, QPointF(0, 0))
point, snappedX, snappedY = s.snapPointToGrid(QPointF(0.5, 0.5), 3)
self.assertFalse(snappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(0.5, 0.5))
# with offset grid
l.gridSettings().setOffset(QgsLayoutPoint(2, 0))
point, snappedX, snappedY = s.snapPointToGrid(QPointF(13, 23), 1)
self.assertTrue(snappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(12, 23))
def testSnapPointsToGrid(self):
p = QgsProject()
l = QgsLayout(p)
# need a page to snap to grid
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
l.pageCollection().addPage(page)
s = QgsLayoutSnapper(l)
l.gridSettings().setResolution(QgsLayoutMeasurement(5, QgsUnitTypes.LayoutMillimeters))
s.setSnapToGrid(True)
s.setSnapTolerance(1)
delta, snappedX, snappedY = s.snapPointsToGrid([QPointF(1, 0.5)], 1)
self.assertTrue(snappedX)
self.assertTrue(snappedY)
self.assertEqual(delta, QPointF(-1, -0.5))
point, snappedX, snappedY = s.snapPointsToGrid([QPointF(9, 2), QPointF(12, 6)], 1)
self.assertTrue(snappedX)
self.assertTrue(snappedY)
self.assertEqual(point, QPointF(1, -1))
point, snappedX, snappedY = s.snapPointsToGrid([QPointF(9, 2), QPointF(12, 7)], 1)
self.assertTrue(snappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(1, 0))
point, snappedX, snappedY = s.snapPointsToGrid([QPointF(8, 2), QPointF(12, 6)], 1)
self.assertFalse(snappedX)
self.assertTrue(snappedY)
self.assertEqual(point, QPointF(0, -1))
# grid disabled
s.setSnapToGrid(False)
point, snappedX, snappedY = s.snapPointsToGrid([QPointF(1, 1)], 1)
self.assertFalse(snappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(0, 0))
s.setSnapToGrid(True)
# with different pixel scale
point, snappedX, snappedY = s.snapPointsToGrid([QPointF(0.5, 0.5)], 1)
self.assertTrue(snappedX)
self.assertTrue(snappedY)
self.assertEqual(point, QPointF(-.5, -.5))
point, snappedX, snappedY = s.snapPointsToGrid([QPointF(0.5, 0.5)], 3)
self.assertFalse(snappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(0, 0))
# with offset grid
l.gridSettings().setOffset(QgsLayoutPoint(2, 0))
point, snappedX, snappedY = s.snapPointsToGrid([QPointF(13, 23)], 1)
self.assertTrue(snappedX)
self.assertFalse(snappedY)
self.assertEqual(point, QPointF(-1, 0))
def testSnapPointToGuides(self):
p = QgsProject()
l = QgsLayout(p)
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
l.pageCollection().addPage(page)
s = QgsLayoutSnapper(l)
guides = l.guides()
s.setSnapToGuides(True)
s.setSnapTolerance(1)
# no guides
point, snapped = s.snapPointToGuides(0.5, Qt.Vertical, 1)
self.assertFalse(snapped)
guides.addGuide(QgsLayoutGuide(Qt.Vertical, QgsLayoutMeasurement(1), page))
point, snapped = s.snapPointToGuides(0.5, Qt.Vertical, 1)
self.assertTrue(snapped)
self.assertEqual(point, 1)
# outside tolerance
point, snapped = s.snapPointToGuides(5.5, Qt.Vertical, 1)
self.assertFalse(snapped)
# snapping off
s.setSnapToGuides(False)
point, snapped = s.snapPointToGuides(0.5, Qt.Vertical, 1)
self.assertFalse(snapped)
s.setSnapToGuides(True)
# snap to hoz
point, snapped = s.snapPointToGuides(0.5, Qt.Horizontal, 1)
self.assertFalse(snapped)
guides.addGuide(QgsLayoutGuide(Qt.Horizontal, QgsLayoutMeasurement(1), page))
point, snapped = s.snapPointToGuides(0.5, Qt.Horizontal, 1)
self.assertTrue(snapped)
self.assertEqual(point, 1)
# with different pixel scale
point, snapped = s.snapPointToGuides(0.5, Qt.Horizontal, 3)
self.assertFalse(snapped)
def testSnapPointsToGuides(self):
p = QgsProject()
l = QgsLayout(p)
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
l.pageCollection().addPage(page)
s = QgsLayoutSnapper(l)
guides = l.guides()
s.setSnapToGuides(True)
s.setSnapTolerance(1)
# no guides
delta, snapped = s.snapPointsToGuides([0.5], Qt.Vertical, 1)
self.assertFalse(snapped)
guides.addGuide(QgsLayoutGuide(Qt.Vertical, QgsLayoutMeasurement(1), page))
point, snapped = s.snapPointsToGuides([0.7], Qt.Vertical, 1)
self.assertTrue(snapped)
self.assertAlmostEqual(point, 0.3, 5)
point, snapped = s.snapPointsToGuides([0.7, 1.2], Qt.Vertical, 1)
self.assertTrue(snapped)
self.assertAlmostEqual(point, -0.2, 5)
# outside tolerance
point, snapped = s.snapPointsToGuides([5.5], Qt.Vertical, 1)
self.assertFalse(snapped)
# snapping off
s.setSnapToGuides(False)
point, snapped = s.snapPointsToGuides([0.5], Qt.Vertical, 1)
self.assertFalse(snapped)
s.setSnapToGuides(True)
# snap to hoz
point, snapped = s.snapPointsToGuides([0.5], Qt.Horizontal, 1)
self.assertFalse(snapped)
guides.addGuide(QgsLayoutGuide(Qt.Horizontal, QgsLayoutMeasurement(1), page))
point, snapped = s.snapPointsToGuides([0.7], Qt.Horizontal, 1)
self.assertTrue(snapped)
self.assertAlmostEqual(point, 0.3, 5)
point, snapped = s.snapPointsToGuides([0.7, 1.2], Qt.Horizontal, 1)
self.assertTrue(snapped)
self.assertAlmostEqual(point, -0.2, 5)
point, snapped = s.snapPointsToGuides([0.7, 0.9, 1.2], Qt.Horizontal, 1)
self.assertTrue(snapped)
self.assertAlmostEqual(point, 0.1, 5)
# with different pixel scale
point, snapped = s.snapPointsToGuides([0.5, 1.5], Qt.Horizontal, 3)
self.assertFalse(snapped)
def testSnapPointToItems(self):
p = QgsProject()
l = QgsLayout(p)
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
#l.pageCollection().addPage(page)
s = QgsLayoutSnapper(l)
guides = l.guides()
s.setSnapToItems(True)
s.setSnapTolerance(1)
# no items
point, snapped = s.snapPointToItems(0.5, Qt.Horizontal, 1, [])
self.assertFalse(snapped)
line = QGraphicsLineItem()
line.setVisible(True)
point, snapped = s.snapPointToItems(0.5, Qt.Horizontal, 1, [], line)
self.assertFalse(line.isVisible())
guides.addGuide(QgsLayoutGuide(Qt.Vertical, QgsLayoutMeasurement(1), page))
# add an item
item1 = QgsLayoutItemMap(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
point, snapped = s.snapPointToItems(3.5, Qt.Horizontal, 1, [], line)
self.assertTrue(snapped)
self.assertEqual(point, 4)
self.assertTrue(line.isVisible())
point, snapped = s.snapPointToItems(4.5, Qt.Horizontal, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, 4)
# ignoring item
point, snapped = s.snapPointToItems(4.5, Qt.Horizontal, 1, [item1])
self.assertFalse(snapped)
# outside tolerance
point, snapped = s.snapPointToItems(5.5, Qt.Horizontal, 1, [], line)
self.assertFalse(snapped)
self.assertFalse(line.isVisible())
# snap to center
point, snapped = s.snapPointToItems(12.5, Qt.Horizontal, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, 13)
# snap to right
point, snapped = s.snapPointToItems(22.5, Qt.Horizontal, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, 22)
#snap to top
point, snapped = s.snapPointToItems(7.5, Qt.Vertical, 1, [], line)
self.assertTrue(snapped)
self.assertEqual(point, 8)
self.assertTrue(line.isVisible())
point, snapped = s.snapPointToItems(8.5, Qt.Vertical, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, 8)
# outside tolerance
point, snapped = s.snapPointToItems(5.5, Qt.Vertical, 1, [], line)
self.assertFalse(snapped)
self.assertFalse(line.isVisible())
# snap to center
point, snapped = s.snapPointToItems(13.5, Qt.Vertical, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, 14)
# snap to bottom
point, snapped = s.snapPointToItems(20.5, Qt.Vertical, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, 20)
# snapping off
s.setSnapToItems(False)
line.setVisible(True)
point, snapped = s.snapPointToItems(20.5, Qt.Vertical, 1, [], line)
self.assertFalse(snapped)
self.assertFalse(line.isVisible())
# with different pixel scale
s.setSnapToItems(True)
point, snapped = s.snapPointToItems(20.5, Qt.Vertical, 3, [])
self.assertFalse(snapped)
def testSnapPointsToItems(self):
p = QgsProject()
l = QgsLayout(p)
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
#l.pageCollection().addPage(page)
s = QgsLayoutSnapper(l)
guides = l.guides()
s.setSnapToItems(True)
s.setSnapTolerance(1)
# no items
point, snapped = s.snapPointsToItems([0.5], Qt.Horizontal, 1, [])
self.assertFalse(snapped)
line = QGraphicsLineItem()
line.setVisible(True)
point, snapped = s.snapPointsToItems([0.5], Qt.Horizontal, 1, [], line)
self.assertFalse(line.isVisible())
guides.addGuide(QgsLayoutGuide(Qt.Vertical, QgsLayoutMeasurement(1), page))
# add an item
item1 = QgsLayoutItemMap(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
point, snapped = s.snapPointsToItems([3.5], Qt.Horizontal, 1, [], line)
self.assertTrue(snapped)
self.assertEqual(point, 0.5)
self.assertTrue(line.isVisible())
point, snapped = s.snapPointsToItems([4.5], Qt.Horizontal, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, -0.5)
point, snapped = s.snapPointsToItems([4.6, 4.5], Qt.Horizontal, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, -0.5)
point, snapped = s.snapPointsToItems([4.6, 4.5, 3.7], Qt.Horizontal, 1, [])
self.assertTrue(snapped)
self.assertAlmostEqual(point, 0.3, 5)
# ignoring item
point, snapped = s.snapPointsToItems([4.5], Qt.Horizontal, 1, [item1])
self.assertFalse(snapped)
# outside tolerance
point, snapped = s.snapPointsToItems([5.5], Qt.Horizontal, 1, [], line)
self.assertFalse(snapped)
self.assertFalse(line.isVisible())
# snap to center
point, snapped = s.snapPointsToItems([12.5], Qt.Horizontal, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, 0.5)
# snap to right
point, snapped = s.snapPointsToItems([22.5], Qt.Horizontal, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, -0.5)
#snap to top
point, snapped = s.snapPointsToItems([7.5], Qt.Vertical, 1, [], line)
self.assertTrue(snapped)
self.assertEqual(point, 0.5)
self.assertTrue(line.isVisible())
point, snapped = s.snapPointsToItems([8.5], Qt.Vertical, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, -0.5)
# outside tolerance
point, snapped = s.snapPointsToItems([5.5], Qt.Vertical, 1, [], line)
self.assertFalse(snapped)
self.assertFalse(line.isVisible())
# snap to center
point, snapped = s.snapPointsToItems([13.5], Qt.Vertical, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, 0.5)
# snap to bottom
point, snapped = s.snapPointsToItems([20.5], Qt.Vertical, 1, [])
self.assertTrue(snapped)
self.assertEqual(point, -0.5)
# snapping off
s.setSnapToItems(False)
line.setVisible(True)
point, snapped = s.snapPointsToItems([20.5], Qt.Vertical, 1, [], line)
self.assertFalse(snapped)
self.assertFalse(line.isVisible())
# with different pixel scale
s.setSnapToItems(True)
point, snapped = s.snapPointsToItems([20.5], Qt.Vertical, 3, [])
self.assertFalse(snapped)
def testSnapPoint(self):
p = QgsProject()
l = QgsLayout(p)
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
l.pageCollection().addPage(page)
s = QgsLayoutSnapper(l)
guides = l.guides()
# first test snapping to grid
l.gridSettings().setResolution(QgsLayoutMeasurement(5, QgsUnitTypes.LayoutMillimeters))
s.setSnapToGrid(True)
s.setSnapTolerance(1)
point, snapped = s.snapPoint(QPointF(1, 1), 1)
self.assertTrue(snapped)
self.assertEqual(point, QPointF(0, 0))
s.setSnapToItems(False)
s.setSnapToGrid(False)
point, snapped = s.snapPoint(QPointF(1, 1), 1)
self.assertFalse(snapped)
self.assertEqual(point, QPointF(1, 1))
# test that guide takes precedence
s.setSnapToGrid(True)
s.setSnapToGuides(True)
guides.addGuide(QgsLayoutGuide(Qt.Horizontal, QgsLayoutMeasurement(0.5), page))
point, snapped = s.snapPoint(QPointF(1, 1), 1)
self.assertTrue(snapped)
self.assertEqual(point, QPointF(0, 0.5))
# add an item
item1 = QgsLayoutItemMap(l)
item1.attemptMove(QgsLayoutPoint(121, 1.1, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
# test that guide takes precedence over item
s.setSnapToGrid(True)
s.setSnapToGuides(True)
s.setSnapToItems(True)
point, snapped = s.snapPoint(QPointF(1, 1), 1)
self.assertTrue(snapped)
self.assertEqual(point, QPointF(0, 0.5))
# but items take precedence over grid
s.setSnapToGuides(False)
point, snapped = s.snapPoint(QPointF(1, 1), 1)
self.assertTrue(snapped)
self.assertEqual(point, QPointF(0, 1.1))
# ... unless item is ignored!
point, snapped = s.snapPoint(QPointF(1, 1), 1, None, None, [item1])
self.assertTrue(snapped)
self.assertEqual(point, QPointF(0, 0))
def testSnapRect(self):
p = QgsProject()
l = QgsLayout(p)
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
l.pageCollection().addPage(page)
s = QgsLayoutSnapper(l)
guides = l.guides()
# first test snapping to grid
l.gridSettings().setResolution(QgsLayoutMeasurement(5, QgsUnitTypes.LayoutMillimeters))
s.setSnapToItems(False)
s.setSnapToGrid(True)
s.setSnapTolerance(1)
rect, snapped = s.snapRect(QRectF(1, 1, 2, 1), 1)
self.assertTrue(snapped)
self.assertEqual(rect, QRectF(0, 0, 2, 1))
rect, snapped = s.snapRect(QRectF(1, 1, 3.5, 3.5), 1)
self.assertTrue(snapped)
self.assertEqual(rect, QRectF(1.5, 1.5, 3.5, 3.5))
s.setSnapToItems(False)
s.setSnapToGrid(False)
rect, snapped = s.snapRect(QRectF(1, 1, 3.5, 3.5), 1)
self.assertFalse(snapped)
self.assertEqual(rect, QRectF(1, 1, 3.5, 3.5))
# test that guide takes precedence
s.setSnapToGrid(True)
s.setSnapToGuides(True)
guides.addGuide(QgsLayoutGuide(Qt.Horizontal, QgsLayoutMeasurement(0.5), page))
rect, snapped = s.snapRect(QRectF(1, 1, 2, 3), 1)
self.assertTrue(snapped)
self.assertEqual(rect, QRectF(0.0, 0.5, 2.0, 3.0))
# add an item
item1 = QgsLayoutItemMap(l)
item1.attemptMove(QgsLayoutPoint(121, 1.1, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
# test that guide takes precedence over item
s.setSnapToGrid(True)
s.setSnapToGuides(True)
s.setSnapToItems(True)
rect, snapped = s.snapRect(QRectF(1, 1, 2, 3), 1)
self.assertTrue(snapped)
self.assertEqual(rect, QRectF(0.0, 0.5, 2.0, 3.0))
# but items take precedence over grid
s.setSnapToGuides(False)
rect, snapped = s.snapRect(QRectF(1, 1, 2, 3), 1)
self.assertTrue(snapped)
self.assertEqual(rect, QRectF(0.0, 1.1, 2.0, 3.0))
# ... unless item is ignored!
rect, snapped = s.snapRect(QRectF(1, 1, 2, 3), 1, None, None, [item1])
self.assertTrue(snapped)
self.assertEqual(rect, QRectF(0.0, 0.0, 2.0, 3.0))
def testReadWriteXml(self):
p = QgsProject()
l = QgsLayout(p)
l.initializeDefaults()
snapper = l.snapper()
snapper.setSnapToGrid(True)
snapper.setSnapTolerance(1)
snapper.setSnapToGuides(True)
snapper.setSnapToItems(True)
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
self.assertTrue(snapper.writeXml(elem, doc, QgsReadWriteContext()))
l2 = QgsLayout(p)
l2.initializeDefaults()
snapper2 = l2.snapper()
self.assertTrue(snapper2.readXml(elem.firstChildElement(), doc, QgsReadWriteContext()))
self.assertTrue(snapper2.snapToGrid())
self.assertEqual(snapper2.snapTolerance(), 1)
self.assertTrue(snapper2.snapToGuides())
self.assertTrue(snapper2.snapToItems())
snapper.setSnapToGrid(False)
snapper.setSnapTolerance(1)
snapper.setSnapToGuides(False)
snapper.setSnapToItems(False)
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
self.assertTrue(snapper.writeXml(elem, doc, QgsReadWriteContext()))
self.assertTrue(snapper2.readXml(elem.firstChildElement(), doc, QgsReadWriteContext()))
self.assertFalse(snapper2.snapToGrid())
self.assertFalse(snapper2.snapToGuides())
self.assertFalse(snapper2.snapToItems())
if __name__ == '__main__':
unittest.main()
| 35.635647 | 95 | 0.622715 | 2,473 | 22,593 | 5.678528 | 0.091387 | 0.069786 | 0.049989 | 0.064089 | 0.853806 | 0.840632 | 0.799758 | 0.775404 | 0.745211 | 0.687674 | 0 | 0.036574 | 0.256938 | 22,593 | 633 | 96 | 35.691943 | 0.799917 | 0.066348 | 0 | 0.673961 | 0 | 0 | 0.006657 | 0.001902 | 0 | 0 | 0 | 0 | 0.398249 | 1 | 0.021882 | false | 0 | 0.013129 | 0 | 0.037199 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
64a4e83b993a419523a81b9eaaeb861ab617009b | 33 | py | Python | Sources/JYMT-ML-StructureFinder/xyz2mol/__init__.py | jerry0317/JYMoleculeTool-ML-Swift | 5974806791c46334ff75e175cf59c1f0f8f09db7 | [
"MIT"
] | null | null | null | Sources/JYMT-ML-StructureFinder/xyz2mol/__init__.py | jerry0317/JYMoleculeTool-ML-Swift | 5974806791c46334ff75e175cf59c1f0f8f09db7 | [
"MIT"
] | null | null | null | Sources/JYMT-ML-StructureFinder/xyz2mol/__init__.py | jerry0317/JYMoleculeTool-ML-Swift | 5974806791c46334ff75e175cf59c1f0f8f09db7 | [
"MIT"
] | null | null | null | from .oechem_xyz2smiles import *
| 16.5 | 32 | 0.818182 | 4 | 33 | 6.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 0.121212 | 33 | 1 | 33 | 33 | 0.862069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
64a7f725de7ddf2854cd530ca2e60f4ec7a1c70b | 114 | py | Python | src/main/python/runtime/trees/__init__.py | danilkolikov/fnn | 0f5ad2d9fdd1f03d3bf62255da14b05e4e0289e1 | [
"MIT"
] | 1 | 2019-01-06T04:42:28.000Z | 2019-01-06T04:42:28.000Z | src/main/python/runtime/trees/__init__.py | danilkolikov/fnn | 0f5ad2d9fdd1f03d3bf62255da14b05e4e0289e1 | [
"MIT"
] | null | null | null | src/main/python/runtime/trees/__init__.py | danilkolikov/fnn | 0f5ad2d9fdd1f03d3bf62255da14b05e4e0289e1 | [
"MIT"
] | null | null | null | from .tensor_tree import SumTree, ProdTree, empty_tree, stack, make_tuple
from .operator_tree import OperatorTree
| 38 | 73 | 0.842105 | 16 | 114 | 5.75 | 0.75 | 0.217391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 114 | 2 | 74 | 57 | 0.901961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
64b7dff811bf7ef3a2485e3ca06ce1244f45a760 | 4,334 | py | Python | test/test_recipient_lists.py | Ometria/python-sparkpost | b7eb2e30effc0d35cf25d7acaa4a5c304c25c9e5 | [
"Apache-2.0"
] | 100 | 2015-02-11T20:07:23.000Z | 2021-10-18T10:27:35.000Z | test/test_recipient_lists.py | Ometria/python-sparkpost | b7eb2e30effc0d35cf25d7acaa4a5c304c25c9e5 | [
"Apache-2.0"
] | 152 | 2015-02-09T01:34:02.000Z | 2021-11-08T17:32:30.000Z | test/test_recipient_lists.py | Ometria/python-sparkpost | b7eb2e30effc0d35cf25d7acaa4a5c304c25c9e5 | [
"Apache-2.0"
] | 79 | 2015-02-14T07:42:17.000Z | 2022-02-25T00:52:28.000Z | import pytest
import responses
from sparkpost import SparkPost
from sparkpost import RecipientLists
from sparkpost.exceptions import SparkPostAPIException
def test_translate_keys_with_id():
t = RecipientLists('uri', 'key')
results = t._translate_keys(id='test_id')
assert results['id'] == 'test_id'
@responses.activate
def test_success_create():
responses.add(
responses.POST,
'https://api.sparkpost.com/api/v1/recipient-lists',
status=200,
content_type='application/json',
body='{"results": "yay"}'
)
sp = SparkPost('fake-key')
results = sp.recipient_lists.create()
assert results == 'yay'
@responses.activate
def test_fail_create():
responses.add(
responses.POST,
'https://api.sparkpost.com/api/v1/recipient-lists',
status=500,
content_type='application/json',
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.recipient_lists.create()
@responses.activate
def test_success_update():
responses.add(
responses.PUT,
'https://api.sparkpost.com/api/v1/recipient-lists/foobar',
status=200,
content_type='application/json',
body='{"results": "yay"}'
)
sp = SparkPost('fake-key')
results = sp.recipient_lists.update('foobar', name='foobar')
assert results == 'yay'
@responses.activate
def test_fail_update():
responses.add(
responses.PUT,
'https://api.sparkpost.com/api/v1/recipient-lists/foobar',
status=500,
content_type='application/json',
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.recipient_lists.update('foobar', name='foobar')
@responses.activate
def test_success_delete():
responses.add(
responses.DELETE,
'https://api.sparkpost.com/api/v1/recipient-lists/foobar',
status=200,
content_type='application/json',
body='{"results": "yay"}'
)
sp = SparkPost('fake-key')
results = sp.recipient_lists.delete('foobar')
assert results == 'yay'
@responses.activate
def test_fail_delete():
responses.add(
responses.DELETE,
'https://api.sparkpost.com/api/v1/recipient-lists/foobar',
status=500,
content_type='application/json',
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.recipient_lists.delete('foobar')
@responses.activate
def test_success_get():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/recipient-lists/foobar',
status=200,
content_type='application/json',
body='{"results": "yay"}'
)
sp = SparkPost('fake-key')
results = sp.recipient_lists.get('foobar')
assert results == "yay"
@responses.activate
def test_success_get_with_recipients():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/recipient-lists/foobar',
status=200,
content_type='application/json',
body='{"results": "yay"}'
)
sp = SparkPost('fake-key')
results = sp.recipient_lists.get('foobar', True)
assert results == "yay"
@responses.activate
def test_fail_get():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/recipient-lists/foobar',
status=404,
content_type='application/json',
body="""
{"errors": [{"message": "cant find", "description": "where you go"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.recipient_lists.get('foobar')
@responses.activate
def test_success_list():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/recipient-lists',
status=200,
content_type='application/json',
body='{"results": "yay"}'
)
sp = SparkPost('fake-key')
response = sp.recipient_lists.list()
assert response == "yay"
| 26.426829 | 77 | 0.619289 | 471 | 4,334 | 5.592357 | 0.138004 | 0.106302 | 0.07593 | 0.091116 | 0.868641 | 0.839408 | 0.795748 | 0.764996 | 0.716401 | 0.678436 | 0 | 0.011958 | 0.228196 | 4,334 | 163 | 78 | 26.588957 | 0.775486 | 0 | 0 | 0.693431 | 0 | 0 | 0.302953 | 0 | 0 | 0 | 0 | 0 | 0.051095 | 1 | 0.080292 | false | 0 | 0.036496 | 0 | 0.116788 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b3edc9918af68489b61ac6a4d66c84088023032b | 13,345 | py | Python | inkscape/.config/inkscape/extensions/circuitSymbols/drawSwitches.py | Elyk8/dotrice | 68924c7d1e3026ab94edd8c4f35c4ae30cf28f0c | [
"BSD-3-Clause"
] | null | null | null | inkscape/.config/inkscape/extensions/circuitSymbols/drawSwitches.py | Elyk8/dotrice | 68924c7d1e3026ab94edd8c4f35c4ae30cf28f0c | [
"BSD-3-Clause"
] | null | null | null | inkscape/.config/inkscape/extensions/circuitSymbols/drawSwitches.py | Elyk8/dotrice | 68924c7d1e3026ab94edd8c4f35c4ae30cf28f0c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import inkscapeMadeEasy.inkscapeMadeEasy_Base as inkBase
import inkscapeMadeEasy.inkscapeMadeEasy_Draw as inkDraw
class switch(inkBase.inkscapeMadeEasy):
def add(self, vector, delta):
# nector does not need to be numpy array. delta will be converted to numpy array. Numpy can then deal with np.array + list
return vector + np.array(delta)
# ---------------------------------------------
def drawNPST(self, parent, position=[0, 0], value='S', label='Switch', angleDeg=0, isPushButton=False, nPoles=1, flagOpen=True,
drawCommuteArrow=False, commuteText='', flagVolt=True, voltName='v', flagCurr=True, currName='i', invertArrows=False,
convention='passive', wireExtraSize=0):
""" draws a switch with two terminals only
parent: parent object
position: position [x,y]
value: string with resistor value. (default 'S')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
isPushButton: draws push-button (defalut: False)
nPoles: numer of poles (default: 1)
flagOpen: normaly open switch (default:True)
drawCommuteArrow: draw comuting arrow (default: False)
commuteText: string with comuting info
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
wireExtraSize: additional length added to the terminals. If negative, the length will be reduced. default: 0)
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group, label)
color = inkDraw.color.defined('red')
colorBlack = inkDraw.color.defined('black')
colorWhite = inkDraw.color.defined('white')
lineStyleSign = inkDraw.lineStyle.set(lineWidth=0.7, lineColor=colorBlack, fillColor=colorWhite)
[arrowStart, arrowEnd] = inkDraw.marker.createArrow1Marker(self, 'arrowSwitch', RenameMode=0, scale=0.25, strokeColor=color, fillColor=color)
inkDraw.line.relCoords(elem, [[-(15 + wireExtraSize), 0]], self.add(position, [-10, 0]))
inkDraw.line.relCoords(elem, [[15 + wireExtraSize, 0]], self.add(position, [10, 0]))
if isPushButton: # push-button
if flagOpen:
inkDraw.line.relCoords(elem, [[20, 0]], self.add(position, [-10, -5]))
inkDraw.line.relCoords(elem, [[0, -7]], self.add(position, [0, -5]))
else:
inkDraw.line.relCoords(elem, [[20, 0]], self.add(position, [-10, 2]))
inkDraw.line.relCoords(elem, [[0, -9]], self.add(position, [0, 2]))
else: # throw switch
if flagOpen:
inkDraw.line.relCoords(elem, [[20, -8]], self.add(position, [-10, 0]))
else:
inkDraw.line.relCoords(elem, [[20, -2]], self.add(position, [-10, 0]))
inkDraw.circle.centerRadius(elem, self.add(position, [10, 0]), 1.2, offset=[0, 0], lineStyle=lineStyleSign)
inkDraw.circle.centerRadius(elem, self.add(position, [-10, 0]), 1.2, offset=[0, 0], lineStyle=lineStyleSign)
if drawCommuteArrow:
if commuteText:
if isPushButton: # push-button
pos_text = self.add(position, [-13, -10 - self.textOffset])
else: # throw switch
pos_text = self.add(position, [-13, -5 - self.textOffset])
if inkDraw.useLatex:
commuteText = '$' + commuteText + '$'
inkDraw.text.latex(self, group, commuteText, pos_text, textColor=color, fontSize=self.fontSize * 0.8, refPoint='tc',
preambleFile=self.preambleFile)
if isPushButton: # push-button
lineStyle = inkDraw.lineStyle.set(lineWidth=0.6, lineColor=color, markerEnd=arrowEnd, strokeDashArray='1,1.5')
inkDraw.line.relCoords(group, [[0, 14]], self.add(position, [-5, -8]), lineStyle=lineStyle)
else: # throw switch
if flagOpen:
lineStyle = inkDraw.lineStyle.set(lineWidth=0.6, lineColor=color, markerEnd=arrowEnd, strokeDashArray='1,1.5')
else:
lineStyle = inkDraw.lineStyle.set(lineWidth=0.6, lineColor=color, markerStart=arrowStart, strokeDashArray='1,1.5')
inkDraw.arc.startEndRadius(group, [-4, -10], [3, 1], 10, self.add(position, [-1, 0]), lineStyle=lineStyle, flagRightOf=False)
if isPushButton: # push-button
pos_text = self.add(position, [3, -6 - self.textOffset])
else: # throw switch
if drawCommuteArrow:
pos_text = self.add(position, [3, -8 - self.textOffset])
else:
if nPoles > 1:
pos_text = self.add(position, [3, -6 - self.textOffset])
else:
pos_text = self.add(position, [-2, -6 - self.textOffset])
if value:
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bl', preambleFile=self.preambleFile)
# multiple poles
if nPoles > 1:
spacingY = -25
for i in range(nPoles - 1):
self.copyElement(elem, group, distance=[0, spacingY * (i + 1)])
lineStyle = inkDraw.lineStyle.set(lineWidth=0.6, lineColor=colorBlack, strokeDashArray='1.5,1.5')
if isPushButton: # push-button
if flagOpen:
inkDraw.line.relCoords(elem, [[0, spacingY * (nPoles - 1)]], self.add(position, [0, -7]), lineStyle=lineStyle)
else:
inkDraw.line.relCoords(elem, [[0, spacingY * (nPoles - 1)]], self.add(position, [0, 2]), lineStyle=lineStyle)
else: # throw switch
if flagOpen:
inkDraw.line.relCoords(elem, [[0, spacingY * (nPoles - 1)]], self.add(position, [0, -4]), lineStyle=lineStyle)
else:
inkDraw.line.relCoords(elem, [[0, spacingY * (nPoles - 1)]], self.add(position, [0, -1]), lineStyle=lineStyle)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, self.add(position, [0, 7]), name=voltName, color=self.voltageColor, angleDeg=angleDeg,
invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, self.add(position, [0, 7]), name=voltName, color=self.voltageColor, angleDeg=angleDeg,
invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, self.add(position, [20 + wireExtraSize, -5]), name=currName, color=self.currentColor, angleDeg=angleDeg,
invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawNPNT(self, parent, position=[0, 0], value='S', label='Switch', angleDeg=0, connection=1, nPoles=1, nThrows=1, drawCommuteArrow=False,
commuteOrientation='ccw', commuteText='', flagVolt=True, voltName='v', flagCurr=True, currName='i', invertArrows=False,
convention='passive', wireExtraSize=0):
""" draws a switch with two terminals only
parent: parent object
position: position [x,y]
value: string with resistor value. (default 'S')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
connection: switch connection position (default:1) 0: OPEN
drawCommuteArrow: draw comuting arrow (default: False)
commuteOrientation: orientation of the commutation arrow, 'cw', 'ccw' (default)
commuteText: string with comuting info
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
wireExtraSize: additional length added to the terminals. If negative, the length will be reduced. default: 0)
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group, label)
color = inkDraw.color.defined('red')
colorBlack = inkDraw.color.defined('black')
colorWhite = inkDraw.color.defined('white')
lineStyleSign = inkDraw.lineStyle.set(lineWidth=0.7, lineColor=colorBlack, fillColor=colorWhite)
[arrowStart, arrowEnd] = inkDraw.marker.createArrow1Marker(self, 'arrowSwitch', RenameMode=0, scale=0.25, strokeColor=color, fillColor=color)
# pole
inkDraw.line.relCoords(elem, [[-(15 + wireExtraSize), 0]], self.add(position, [-10, 0]))
# throw
if nThrows < 3:
spacingThrowY = 20
else:
spacingThrowY = 10
Y_positions = [(i - (nThrows - 1) / float(2)) * spacingThrowY for i in range(nThrows)]
# connection position
conn = min(connection, nThrows)
if conn > 0:
inkDraw.line.relCoords(elem, [[15, Y_positions[conn - 1]]], self.add(position, [-10, 0]))
else:
inkDraw.line.relCoords(elem, [[7.5, Y_positions[0]]], self.add(position, [-10, 0]))
for i in range(nThrows):
inkDraw.line.relCoords(elem, [[-10, 0], [-(10 + wireExtraSize), 0]], self.add(position, [25 + wireExtraSize, Y_positions[i]]))
inkDraw.circle.centerRadius(elem, self.add(position, [5, Y_positions[i]]), 1.2, offset=[0, 0], lineStyle=lineStyleSign)
inkDraw.text.latex(self, elem, chr(ord('@') + i + 1), self.add(position, [5, Y_positions[i] - self.fontSize * 0.4]),
fontSize=self.fontSize * 0.5, refPoint='bc', preambleFile=self.preambleFile)
inkDraw.circle.centerRadius(elem, self.add(position, [-10, 0]), 1.2, offset=[0, 0], lineStyle=lineStyleSign)
# commute arrow
if drawCommuteArrow:
if commuteText:
pos_text = self.add(position, [-13, - 5 - self.textOffset])
if inkDraw.useLatex:
commuteText = '$' + commuteText + '$'
inkDraw.text.latex(self, group, commuteText, pos_text, textColor=color, fontSize=self.fontSize * 0.8, refPoint='tc',
preambleFile=self.preambleFile)
if commuteOrientation == 'cw':
lineStyle = inkDraw.lineStyle.set(lineWidth=0.6, lineColor=color, markerEnd=arrowEnd, strokeDashArray='1,1.5')
else:
lineStyle = inkDraw.lineStyle.set(lineWidth=0.6, lineColor=color, markerStart=arrowStart, strokeDashArray='1,1.5')
inkDraw.arc.startEndRadius(group, [-4, -9], [-4, 9], 10, self.add(position, [-1, 0]), lineStyle=lineStyle, flagRightOf=False)
# label
pos_text = self.add(position, [-10, Y_positions[0] - self.textOffset])
if value:
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bl', preambleFile=self.preambleFile)
# multiple poles
if nPoles > 1:
spacingY = -(25 + spacingThrowY * (nThrows - 1))
for i in range(nPoles - 1):
self.copyElement(elem, group, distance=[0, spacingY * (i + 1)])
lineStyle = inkDraw.lineStyle.set(lineWidth=0.6, lineColor=colorBlack, strokeDashArray='1.5,1.5')
inkDraw.line.relCoords(elem, [[0, spacingY * (nPoles - 1)]], self.add(position, [-2.5, Y_positions[conn - 1] / 2]), lineStyle=lineStyle)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, self.add(position, [0, 6 + Y_positions[-1]]), name=voltName, color=self.voltageColor, angleDeg=angleDeg,
invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, self.add(position, [0, 6 + Y_positions[-1]]), name=voltName, color=self.voltageColor, angleDeg=angleDeg,
invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrowSimple(group, self.add(position, [-20 - wireExtraSize, 5]), name=currName, color=self.currentColor,
angleDeg=angleDeg + 180, invertArrows=invertArrows, size=10.0, invertTextSide=False, extraAngleText=0.0)
return group
| 53.810484 | 149 | 0.5997 | 1,464 | 13,345 | 5.450137 | 0.141393 | 0.034215 | 0.073317 | 0.051134 | 0.825793 | 0.799098 | 0.772653 | 0.754355 | 0.749467 | 0.732673 | 0 | 0.029626 | 0.269015 | 13,345 | 247 | 150 | 54.02834 | 0.788314 | 0.178569 | 0 | 0.690323 | 0 | 0 | 0.015873 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019355 | false | 0.025806 | 0.012903 | 0.006452 | 0.058065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
37526db6e56f22322a5ac557067ac6f95f661ba1 | 10,252 | py | Python | tests/integration_tests.py | maurice-g/codit-api | ccab85976866e0b8e11f888beecc4aae361b0740 | [
"MIT"
] | 1 | 2022-03-06T01:57:34.000Z | 2022-03-06T01:57:34.000Z | tests/integration_tests.py | maurice-g/codit-api | ccab85976866e0b8e11f888beecc4aae361b0740 | [
"MIT"
] | null | null | null | tests/integration_tests.py | maurice-g/codit-api | ccab85976866e0b8e11f888beecc4aae361b0740 | [
"MIT"
] | null | null | null | import os, time
import pytest
from src.caplena_api_demo import CaplenaAPI, Question, Row, Answer, Project, Code
@pytest.fixture(scope="session")
def client():
caplena_api_key = os.environ.get('CAPLENA_API_KEY')
api = CaplenaAPI('en', caplena_api_key)
baseuri = os.environ.get('BASEURI')
if baseuri:
api.baseURI = baseuri
return api
def test_list_projects(client):
_ = client.listProjects()
def test_list_inheritable_projects(client):
_ = client.listInheritableProjects()
def test_update_question(client):
codebook = [Code(id=1, label='test', category='A')]
question_name = 'testq'
question = Question(name=question_name, codebook=codebook)
rows = [
Row(auxiliary_columns=[], answers=[Answer(text='test', question=question_name, reviewed=False)]),
Row(auxiliary_columns=[], answers=[Answer(text='test2', question=question_name, reviewed=False)])
]
proj1 = client.createProject('testproject', 'en', rows=rows, questions=[question], upload_async=False)
proj2 = client.createProject('testproject', 'en', rows=rows, questions=[question], upload_async=False)
q = proj2.questions[0]
q.inherits_from = proj1.questions[0].id
q_new = client.updateQuestion(q)
assert q_new.inherits_from == proj1.questions[0].id
def test_sync_workflow(client):
codebook = [
{
'id': 1,
'label': 'Code 1',
'category': 'CATEGORY 1'
}, {
'id': 20,
'label': 'Code 2',
'category': 'CATEGORY 2'
}
]
questions = [{'name': 'Question 1', 'codebook': codebook}]
# make sure to have at least 15 answers reviewed to enble predictions
rows_init = [
{
"answers":
[{
"text": "Answer-text 1",
"question": "Question 1"
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
# The values of the additional columns: Needs to be in same order as auxiliary_column_names of survey
},
{
"answers": [{
"text": "Answer-text 2",
"question": "Question 1"
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
},
{
"answers": [{
"text": "Answer-text 3",
"question": "Question 1"
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
}
]
num_projects_before = len(client.listProjects())
questions = [Question.from_json(q) for q in questions]
rows_init = [Row.from_json(row_init) for row_init in rows_init]
new_project = client.createProject(
name="My new project",
language="de",
auxiliary_column_names=['ID', 'some other column'],
translate=True,
questions=questions,
rows=rows_init,
upload_async=False,
request_training=True
)
assert isinstance(new_project, Project)
num_projects_after = len(client.listProjects())
assert num_projects_after == num_projects_before + 1
assert len(new_project.questions) == 1
question_id = new_project.questions[0].id
n_not_reviewed = len([row for row in rows_init if not row.answers[0].reviewed])
assert new_project.rows is not None
assert len(new_project.rows) == len(rows_init)
additional_rows = [
{
"answers": [{
"text": "Answer-text 1 new data",
"question": question_id,
"reviewed": False
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
# The values of the additional columns: Needs to be in same order as auxiliary_column_names of survey
},
{
"answers": [{
"text": "Answer-text 2 new data",
"question": question_id,
"reviewed": False
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
}
]
try:
new_answers = client.addRowsToProject(
new_project.id, [Row.from_json(r) for r in additional_rows],
upload_async=False,
request_training=True
)
answers = client.listAnswers(question_id, no_group=True)
assert len(rows_init) + len(additional_rows) == len(answers)
finally:
_ = client.deleteProject(new_project.id)
assert num_projects_before == len(client.listProjects())
def test_workflow_async(client):
codebook = [
{
'id': 1,
'label': 'Code 1',
'category': 'CATEGORY 1'
}, {
'id': 20,
'label': 'Code 2',
'category': 'CATEGORY 2'
}
]
questions = [{'name': 'Question 1', 'codebook': codebook}]
# make sure to have at least 15 answers reviewed to enble predictions
rows_init = [
{
"answers":
[{
"text": "Answer-text 1",
"question": "Question 1",
"codes": [1, 20],
"reviewed": True
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
# The values of the additional columns: Needs to be in same order as auxiliary_column_names of survey
},
{
"answers": [{
"text": "Answer-text 2",
"question": "Question 1",
"codes": [1],
"reviewed": True
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
},
{
"answers": [{
"text": "Answer-text 3",
"question": "Question 1",
"codes": [20],
"reviewed": True
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
},
{
"answers": [{
"text": "Answer-text 4",
"question": "Question 1",
"codes": [20],
"reviewed": True
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
},
{
"answers":
[{
"text": "Answer-text 5",
"question": "Question 1",
"codes": [1, 20],
"reviewed": True
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
},
{
"answers": [{
"text": "Answer-text 6",
"question": "Question 1",
"codes": [1],
"reviewed": True
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
},
{
"answers":
[{
"text": "Answer-text 7",
"question": "Question 1",
"codes": [1, 20],
"reviewed": True
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
},
{
"answers": [{
"text": "Answer-text 8",
"question": "Question 1",
"reviewed": False
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
},
{
"answers": [{
"text": "Answer-text 9",
"question": "Question 1",
"reviewed": False
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
}
]
num_projects_before = len(client.listProjects())
questions = [Question.from_json(q) for q in questions]
rows_init = [Row.from_json(row_init) for row_init in rows_init]
new_project = client.createProject(
name="My new project",
language="de",
auxiliary_column_names=['ID', 'some other column'],
translate=True,
questions=questions,
rows=rows_init,
upload_async=True,
request_training=True
)
assert isinstance(new_project, Project)
try:
# wait a bit since this is async upload
time.sleep(10)
num_projects_after = len(client.listProjects())
assert num_projects_after == num_projects_before + 1
assert len(new_project.questions) == 1
created_rows = client.listRows(new_project.id)
question_id = new_project.questions[0].id
n_not_reviewed_init = len([row for row in rows_init if not row.answers[0].reviewed])
n_not_reviewed_after_create = len([row for row in created_rows if not row.answers[0].reviewed])
assert n_not_reviewed_after_create == n_not_reviewed_init
additional_rows = [
{
"answers": [{
"text": "Answer-text 1 new data",
"question": question_id,
"reviewed": False
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
# The values of the additional columns: Needs to be in same order as auxiliary_column_names of survey
},
{
"answers": [{
"text": "Answer-text 2 new data",
"question": question_id,
"reviewed": False
}],
"auxiliary_columns": ["ID 1", "Some other column value 1"]
}
]
new_rows = client.addRowsToProject(
new_project.id, [Row.from_json(r) for r in additional_rows],
upload_async=True,
request_training=False
)
print(new_rows)
time.sleep(10)
answers = client.listAnswers(question_id, no_group=True)
assert len(rows_init) + len(additional_rows) == len(answers)
finally:
_ = client.deleteProject(new_project.id)
assert num_projects_before == len(client.listProjects())
| 34.635135 | 125 | 0.508974 | 1,031 | 10,252 | 4.900097 | 0.133851 | 0.011283 | 0.053444 | 0.066508 | 0.839272 | 0.809778 | 0.778306 | 0.77118 | 0.750594 | 0.750594 | 0 | 0.018389 | 0.374073 | 10,252 | 295 | 126 | 34.752542 | 0.768895 | 0.055892 | 0 | 0.64794 | 0 | 0 | 0.192017 | 0 | 0 | 0 | 0 | 0 | 0.052434 | 1 | 0.022472 | false | 0 | 0.011236 | 0 | 0.037453 | 0.003745 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3773af5b92a111793719ea64067c87f71aee7000 | 199 | py | Python | hoa/forms.py | kokopelli314/hoa2 | 4923c28007ae81fa656fbe733b087c719051bd01 | [
"BSD-2-Clause"
] | null | null | null | hoa/forms.py | kokopelli314/hoa2 | 4923c28007ae81fa656fbe733b087c719051bd01 | [
"BSD-2-Clause"
] | 1 | 2021-06-02T00:43:16.000Z | 2021-06-02T00:43:16.000Z | hoa/forms.py | kokopelli314/hoa2 | 4923c28007ae81fa656fbe733b087c719051bd01 | [
"BSD-2-Clause"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError
from hoa.models import User
| 28.428571 | 76 | 0.849246 | 24 | 199 | 7 | 0.708333 | 0.130952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115578 | 199 | 6 | 77 | 33.166667 | 0.954545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.25 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
37cba0234db3b441f2ef025e8bba69b246e80cc1 | 9,144 | py | Python | oxe-api/test/resource/cron/test_run_database_compliance.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/test/resource/cron/test_run_database_compliance.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/test/resource/cron/test_run_database_compliance.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | from test.BaseCase import BaseCase
class TestRunDatabaseCompliance(BaseCase):
@BaseCase.login
@BaseCase.grant_access("/cron/run_database_compliance")
def test_ok_company_without_data(self, token):
s = self.db.tables["Setting"]
self.db.insert({"id": 1, "name": "Company"}, self.db.tables["Company"])
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_CREATION_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_WEBSITE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_IMAGE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_POSTAL_ADDRESS", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITH_POSTAL_ADDRESS_MISSING_GEOLOCATION", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_PHONE_NUMBER", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_EMAIL_ADDRESS", "value": "TRUE"}, s)
response = self.application.post('/cron/run_database_compliance',
headers=self.get_standard_post_header(token))
data_controls = self.db.get(self.db.tables["DataControl"])
self.assertEqual(200, response.status_code)
self.assertEqual(data_controls[0].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[0].value, "Value 'creation_date' of <COMPANY:1> is empty")
self.assertEqual(data_controls[1].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[1].value, "Value 'website' of <COMPANY:1> is empty")
self.assertEqual(data_controls[2].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[2].value, "Value 'image' of <COMPANY:1> is empty")
self.assertEqual(data_controls[3].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[3].value, '<COMPANY:1> has no address registered')
self.assertEqual(data_controls[4].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[4].value, '<COMPANY:1> has no phone number registered as a contact')
self.assertEqual(data_controls[5].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[5].value, '<COMPANY:1> has no email address registered as a contact')
@BaseCase.login
@BaseCase.grant_access("/cron/run_database_compliance")
def test_ok_company_with_all_contact_and_address_data(self, token):
s = self.db.tables["Setting"]
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_CREATION_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_WEBSITE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_IMAGE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_POSTAL_ADDRESS", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITH_POSTAL_ADDRESS_MISSING_GEOLOCATION", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_PHONE_NUMBER", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ENTITIES_WITHOUT_EMAIL_ADDRESS", "value": "TRUE"}, s)
self.db.insert({"id": 11, "thumbnail": bytes("", 'utf8'), "width": 12, "height": 12,
"creation_date": "2020-01-01"}, self.db.tables["Image"])
self.db.insert({"id": 1, "name": "Company", "website": "", "image": 11,
"creation_date": "2020-01-01", "description": "desc"}, self.db.tables["Company"])
self.db.insert({"id": 21, "company_id": 1, "type": "EMAIL ADDRESS", "representative": "ENTITY",
"value": "mail@example.com"}, self.db.tables["CompanyContact"])
self.db.insert({"id": 22, "company_id": 1, "type": "PHONE NUMBER", "representative": "ENTITY",
"value": "045065561"}, self.db.tables["CompanyContact"])
self.db.insert({"id": 31, "company_id": 1, "address_1": "", "city": "",
"country": "", "latitude": 1, "longitude": 1}, self.db.tables["Company_Address"])
response = self.application.post('/cron/run_database_compliance',
headers=self.get_standard_post_header(token))
data_controls = self.db.get(self.db.tables["DataControl"])
self.assertEqual(200, response.status_code)
self.assertEqual(len(data_controls), 0)
@BaseCase.login
@BaseCase.grant_access("/cron/run_database_compliance")
def test_ok_news_with_no_data(self, token):
s = self.db.tables["Setting"]
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_TITLE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_HANDLE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_PUBLICATION_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_START_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_END_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_CONTENT", "value": "TRUE"}, s)
self.db.insert({"id": 1, "title": "My article", "type": "NEWS"}, self.db.tables["Article"])
response = self.application.post('/cron/run_database_compliance',
headers=self.get_standard_post_header(token))
data_controls = self.db.get(self.db.tables["DataControl"])
self.assertEqual(200, response.status_code)
self.assertEqual(len(data_controls), 2)
self.assertEqual(data_controls[0].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[0].value, "Value 'handle' of article <ARTICLE:1> is empty")
self.assertEqual(data_controls[1].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[1].value, "<ARTICLE:1> has no main version and no link")
@BaseCase.login
@BaseCase.grant_access("/cron/run_database_compliance")
def test_ok_news_with_empty_main_version(self, token):
s = self.db.tables["Setting"]
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_TITLE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_HANDLE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_PUBLICATION_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_START_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_END_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_CONTENT", "value": "TRUE"}, s)
self.db.insert({"id": 1, "title": "My article", "type": "NEWS", "handle": "my_article",
"publication_date": "2020-01-01"}, self.db.tables["Article"])
self.db.insert({"id": 1, "article_id": 1, "name": "Version 0", "is_main": 1}, self.db.tables["ArticleVersion"])
response = self.application.post('/cron/run_database_compliance',
headers=self.get_standard_post_header(token))
data_controls = self.db.get(self.db.tables["DataControl"])
self.assertEqual(200, response.status_code)
self.assertEqual(len(data_controls), 1)
self.assertEqual(data_controls[0].category, 'DATABASE COMPLIANCE')
self.assertEqual(data_controls[0].value, "<ARTICLE:1> has an empty main version and no link")
@BaseCase.login
@BaseCase.grant_access("/cron/run_database_compliance")
def test_ok_news_with_all_data(self, token):
s = self.db.tables["Setting"]
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_TITLE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_HANDLE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_PUBLICATION_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_START_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_END_DATE", "value": "TRUE"}, s)
self.db.insert({"property": "HIGHLIGHT_ARTICLE_WITHOUT_CONTENT", "value": "TRUE"}, s)
self.db.insert({"id": 1, "title": "My article", "type": "NEWS", "handle": "my_article",
"publication_date": "2020-01-01"}, self.db.tables["Article"])
self.db.insert({"id": 1, "article_id": 1, "name": "Version 0", "is_main": 1}, self.db.tables["ArticleVersion"])
self.db.insert({"id": 1, "article_version_id": 1, "position": 1, "type": "TITLE1", "content": "title 1"},
self.db.tables["ArticleBox"])
response = self.application.post('/cron/run_database_compliance',
headers=self.get_standard_post_header(token))
data_controls = self.db.get(self.db.tables["DataControl"])
self.assertEqual(200, response.status_code)
self.assertEqual(len(data_controls), 0)
| 63.062069 | 119 | 0.653871 | 1,104 | 9,144 | 5.201087 | 0.10779 | 0.07419 | 0.091954 | 0.111459 | 0.886451 | 0.861372 | 0.857541 | 0.798328 | 0.784396 | 0.767503 | 0 | 0.016849 | 0.182196 | 9,144 | 144 | 120 | 63.5 | 0.751003 | 0 | 0 | 0.689655 | 0 | 0 | 0.3715 | 0.162073 | 0 | 0 | 0 | 0 | 0.232759 | 1 | 0.043103 | false | 0 | 0.008621 | 0 | 0.060345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8066e6022b3a6a7838fad6b070a55ef6df66c77e | 340 | py | Python | OPTUS/Basics/OptusTest.py | dehimmi/OptusPOC | 16736af9a5d21afb7f3cbe7a2e0d55f0a12c174b | [
"bzip2-1.0.6"
] | null | null | null | OPTUS/Basics/OptusTest.py | dehimmi/OptusPOC | 16736af9a5d21afb7f3cbe7a2e0d55f0a12c174b | [
"bzip2-1.0.6"
] | null | null | null | OPTUS/Basics/OptusTest.py | dehimmi/OptusPOC | 16736af9a5d21afb7f3cbe7a2e0d55f0a12c174b | [
"bzip2-1.0.6"
] | null | null | null | import pytest
def test_pass():
assert 1+1 == 3
#client.write_points(result, database='pythondb', time_precision='ms')
# python -m pytest test_example.py --pytest-influxdb --influxdb_host=localhost --influxdb_name=pythondb
# python -m pytest test_example.py --pytest-influxdb --influxdb_host=52.63.126.230 --influxdb_name=pythondb
| 26.153846 | 107 | 0.758824 | 49 | 340 | 5.081633 | 0.591837 | 0.056225 | 0.104418 | 0.136546 | 0.417671 | 0.417671 | 0.417671 | 0.417671 | 0.417671 | 0.417671 | 0 | 0.042904 | 0.108824 | 340 | 12 | 108 | 28.333333 | 0.778878 | 0.814706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
80768fb116752851f78498170f9d037461438e5e | 8,821 | py | Python | kooplex-proxy/proxy/models.py | enasequence/covid-workflow-manager | 185899b236f925dadb45ea5f224713202c4e7b00 | [
"Apache-2.0"
] | 5 | 2020-06-29T19:50:36.000Z | 2022-01-31T09:16:29.000Z | kooplex-proxy/proxy/models.py | enasequence/covid-workflow-manager | 185899b236f925dadb45ea5f224713202c4e7b00 | [
"Apache-2.0"
] | 10 | 2020-06-29T19:48:57.000Z | 2022-02-13T11:54:06.000Z | kooplex-proxy/proxy/models.py | enasequence/covid-workflow-manager | 185899b236f925dadb45ea5f224713202c4e7b00 | [
"Apache-2.0"
] | 2 | 2020-06-25T13:40:52.000Z | 2021-02-03T20:23:09.000Z | from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, TEXT, TIMESTAMP, \
VARCHAR, INTEGER, REAL, BOOLEAN, DATE
from database import Base
class CovidCountryWeekly(Base):
__tablename__ = "ecdc_covid_country_weekly"
iso_a3 = Column(TEXT, primary_key=True)
iso_a2 = Column(TEXT, primary_key=True)
country_name = Column(TEXT, primary_key=True)
country_name_local = Column(TEXT, primary_key=True)
date_year = Column(DOUBLE_PRECISION, primary_key=True)
date_week = Column(DOUBLE_PRECISION, primary_key=True)
ecdc_covid_country_weekly_cases = Column(DOUBLE_PRECISION,
primary_key=True)
ecdc_covid_country_weekly_deaths = Column(DOUBLE_PRECISION,
primary_key=True)
class UniqueVCFAppend(Base):
__tablename__ = "unique_vcf_append"
insertion_ts = Column(TIMESTAMP, primary_key=True)
ena_run = Column(VARCHAR, primary_key=True)
snapshot = Column(VARCHAR, primary_key=True)
integrity = Column(INTEGER, primary_key=True)
class VCFAll(Base):
__tablename__ = "vcf_all"
ena_run = Column(VARCHAR, primary_key=True)
chrom = Column(TEXT, primary_key=True)
pos = Column(INTEGER, primary_key=True)
ref = Column(TEXT, primary_key=True)
alt = Column(TEXT, primary_key=True)
qual = Column(INTEGER, primary_key=True)
filter = Column(TEXT, primary_key=True)
dp = Column(INTEGER, primary_key=True)
af = Column(REAL, primary_key=True)
sb = Column(INTEGER, primary_key=True)
count_ref_forward_base = Column(INTEGER, primary_key=True)
count_ref_reverse_base = Column(INTEGER, primary_key=True)
count_alt_forward_base = Column(INTEGER, primary_key=True)
count_alt_reverse_base = Column(INTEGER, primary_key=True)
hrun = Column(INTEGER, primary_key=True)
indel = Column(BOOLEAN, primary_key=True)
lof = Column(TEXT, primary_key=True)
nmd = Column(TEXT, primary_key=True)
ann_num = Column(INTEGER, primary_key=True)
annotation = Column(TEXT, primary_key=True)
annotation_impact = Column(TEXT, primary_key=True)
gene_name = Column(TEXT, primary_key=True)
gene_id = Column(TEXT, primary_key=True)
feature_type = Column(TEXT, primary_key=True)
feature_id = Column(TEXT, primary_key=True)
transcript_biotype = Column(TEXT, primary_key=True)
rank_ = Column(TEXT, primary_key=True)
hgvs_c = Column(TEXT, primary_key=True)
hgvs_p = Column(TEXT, primary_key=True)
cdna_pos__cdna_length = Column(TEXT, primary_key=True)
cds_pos__cds_length = Column(TEXT, primary_key=True)
aa_pos__aa_length = Column(TEXT, primary_key=True)
distance = Column(INTEGER, primary_key=True)
errors_warnings_info = Column(TEXT, primary_key=True)
class Cov(Base):
__tablename__ = "cov"
ena_run = Column(VARCHAR, primary_key=True)
pos = Column(INTEGER, primary_key=True)
coverage = Column(INTEGER, primary_key=True)
class Meta(Base):
__tablename__ = "meta"
ena_run = Column(VARCHAR, primary_key=True)
collection_date = Column(DATE, primary_key=True)
clean_country = Column(TEXT, primary_key=True)
clean_host = Column(TEXT, primary_key=True)
accession = Column(TEXT, primary_key=True)
sample_accession = Column(TEXT, primary_key=True)
experiment_accession = Column(TEXT, primary_key=True)
study_accession = Column(TEXT, primary_key=True)
description = Column(TEXT, primary_key=True)
country = Column(TEXT, primary_key=True)
first_created = Column(DATE, primary_key=True)
first_public = Column(DATE, primary_key=True)
host = Column(TEXT, primary_key=True)
host_sex = Column(TEXT, primary_key=True)
host_tax_id = Column(INTEGER, primary_key=True)
host_body_site = Column(TEXT, primary_key=True)
bio_material = Column(TEXT, primary_key=True)
culture_collection = Column(TEXT, primary_key=True)
instrument_model = Column(TEXT, primary_key=True)
instrument_platform = Column(TEXT, primary_key=True)
library_layout = Column(TEXT, primary_key=True)
library_name = Column(TEXT, primary_key=True)
library_selection = Column(TEXT, primary_key=True)
library_source = Column(TEXT, primary_key=True)
library_strategy = Column(TEXT, primary_key=True)
sequencing_method = Column(TEXT, primary_key=True)
isolate = Column(TEXT, primary_key=True)
strain = Column(TEXT, primary_key=True)
base_count = Column(DOUBLE_PRECISION, primary_key=True)
collected_by = Column(TEXT, primary_key=True)
broker_name = Column(TEXT, primary_key=True)
center_name = Column(TEXT, primary_key=True)
sample_capture_status = Column(TEXT, primary_key=True)
fastq_ftp = Column(TEXT, primary_key=True)
collection_date_submitted = Column(TEXT, primary_key=True)
checklist = Column(TEXT, primary_key=True)
clean_collection_date = Column(DATE, primary_key=True)
date_isoweek = Column(INTEGER, primary_key=True)
date_isoyear = Column(INTEGER, primary_key=True)
class UniqueCovAppend(Base):
__tablename__ = "unique_cov_append"
insertion_ts = Column(TIMESTAMP, primary_key=True)
ena_run = Column(VARCHAR, primary_key=True)
snapshot = Column(VARCHAR, primary_key=True)
integrity = Column(INTEGER, primary_key=True)
class LineageDef(Base):
__tablename__ = "lineage_def"
variant_id = Column(TEXT, primary_key=True)
pango = Column(TEXT, primary_key=True)
nextstrain = Column(TEXT, primary_key=True)
ref_pos_alt = Column(TEXT, primary_key=True)
codon_change = Column(TEXT, primary_key=True)
gene = Column(TEXT, primary_key=True)
pos = Column(DOUBLE_PRECISION, primary_key=True)
predicted_effect = Column(TEXT, primary_key=True)
protein = Column(TEXT, primary_key=True)
protein_codon_position = Column(DOUBLE_PRECISION, primary_key=True)
ref = Column(TEXT, primary_key=True)
type = Column(TEXT, primary_key=True)
alt = Column(TEXT, primary_key=True)
amino_acid_change = Column(TEXT, primary_key=True)
description = Column(TEXT, primary_key=True)
snp_codon_position = Column(TEXT, primary_key=True)
class Operation(Base):
__tablename__ = "operation"
event_ts = Column(TIMESTAMP, primary_key=True)
last_stage = Column(INTEGER, primary_key=True)
last_exit_code = Column(INTEGER, primary_key=True)
stage = Column(INTEGER, primary_key=True)
exit_code = Column(INTEGER, primary_key=True)
extra_info = Column(TEXT, primary_key=True)
class UniqueCov(Base):
__tablename__ = "unique_cov"
insertion_ts = Column(TIMESTAMP, primary_key=True)
ena_run = Column(VARCHAR, primary_key=True)
snapshot = Column(VARCHAR, primary_key=True)
integrity = Column(INTEGER, primary_key=True)
class UniqueVCF(Base):
__tablename__ = "unique_vcf"
insertion_ts = Column(TIMESTAMP, primary_key=True)
ena_run = Column(VARCHAR, primary_key=True)
snapshot = Column(VARCHAR, primary_key=True)
integrity = Column(INTEGER, primary_key=True)
class VCFAllAppend(Base):
__tablename__ = "vcf_all_append"
ena_run = Column(TEXT, primary_key=True)
chrom = Column(TEXT, primary_key=True)
pos = Column(INTEGER, primary_key=True)
ref = Column(TEXT, primary_key=True)
alt = Column(TEXT, primary_key=True)
qual = Column(INTEGER, primary_key=True)
filter = Column(TEXT, primary_key=True)
dp = Column(INTEGER, primary_key=True)
af = Column(REAL, primary_key=True)
sb = Column(INTEGER, primary_key=True)
count_ref_forward_base = Column(INTEGER, primary_key=True)
count_ref_reverse_base = Column(INTEGER, primary_key=True)
count_alt_forward_base = Column(INTEGER, primary_key=True)
count_alt_reverse_base = Column(INTEGER, primary_key=True)
hrun = Column(INTEGER, primary_key=True)
indel = Column(BOOLEAN, primary_key=True)
lof = Column(TEXT, primary_key=True)
nmd = Column(TEXT, primary_key=True)
ann_num = Column(INTEGER, primary_key=True)
annotation = Column(TEXT, primary_key=True)
annotation_impact = Column(TEXT, primary_key=True)
gene_name = Column(TEXT, primary_key=True)
gene_id = Column(TEXT, primary_key=True)
feature_type = Column(TEXT, primary_key=True)
feature_id = Column(TEXT, primary_key=True)
transcript_biotype = Column(TEXT, primary_key=True)
rank_ = Column(TEXT, primary_key=True)
hgvs_c = Column(TEXT, primary_key=True)
hgvs_p = Column(TEXT, primary_key=True)
cdna_pos__cdna_length = Column(TEXT, primary_key=True)
cds_pos__cds_length = Column(TEXT, primary_key=True)
aa_pos__aa_length = Column(TEXT, primary_key=True)
distance = Column(INTEGER, primary_key=True)
errors_warnings_info = Column(TEXT, primary_key=True)
| 42.613527 | 79 | 0.733817 | 1,181 | 8,821 | 5.165114 | 0.134632 | 0.255738 | 0.358033 | 0.295082 | 0.84377 | 0.768033 | 0.589508 | 0.53082 | 0.53082 | 0.516885 | 0 | 0.000273 | 0.168688 | 8,821 | 206 | 80 | 42.820388 | 0.831583 | 0 | 0 | 0.5 | 0 | 0 | 0.014397 | 0.002834 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.016304 | 0 | 0.983696 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
80975bf10593e83235a256eff4cbffc2d218152a | 1,467 | py | Python | segme/model/cascade_psp/loss.py | shkarupa-alex/segme | d5bc0043f9e709c8ccaf8949d662bc6fd6144006 | [
"MIT"
] | 2 | 2021-05-25T18:53:00.000Z | 2021-05-26T12:11:41.000Z | segme/model/cascade_psp/loss.py | shkarupa-alex/segme | d5bc0043f9e709c8ccaf8949d662bc6fd6144006 | [
"MIT"
] | null | null | null | segme/model/cascade_psp/loss.py | shkarupa-alex/segme | d5bc0043f9e709c8ccaf8949d662bc6fd6144006 | [
"MIT"
] | 2 | 2021-11-21T02:39:37.000Z | 2021-12-08T07:26:56.000Z | from keras.losses import MeanAbsoluteError, MeanSquaredError, BinaryCrossentropy
from ...loss import SobelEdgeLoss, WeightedLossFunctionWrapper
def _loss_224(y_true, y_pred, sample_weight=None):
return MeanAbsoluteError()(y_true, y_pred, sample_weight) + \
MeanSquaredError()(y_true, y_pred, sample_weight) + \
5. * SobelEdgeLoss()(y_true, y_pred, sample_weight)
def _loss_28(y_true, y_pred, sample_weight=None):
return BinaryCrossentropy()(y_true, y_pred, sample_weight)
def _loss_56(y_true, y_pred, sample_weight=None):
return .5 * BinaryCrossentropy()(y_true, y_pred, sample_weight) + \
.25 * MeanAbsoluteError()(y_true, y_pred, sample_weight) + \
.25 * MeanSquaredError()(y_true, y_pred, sample_weight)
def _loss_28_2(y_true, y_pred, sample_weight=None):
return BinaryCrossentropy()(y_true, y_pred, sample_weight)
def _loss_28_3(y_true, y_pred, sample_weight=None):
return BinaryCrossentropy()(y_true, y_pred, sample_weight)
def _loss_56_2(y_true, y_pred, sample_weight=None):
return .5 * BinaryCrossentropy()(y_true, y_pred, sample_weight) + \
.25 * MeanAbsoluteError()(y_true, y_pred, sample_weight) + \
.25 * MeanSquaredError()(y_true, y_pred, sample_weight)
def total_losses():
return [_loss_224, _loss_56_2, _loss_28_3, _loss_56, _loss_28_2, _loss_28]
def cascade_psp_losses():
return [WeightedLossFunctionWrapper(tl) for tl in total_losses()]
| 35.780488 | 80 | 0.731425 | 203 | 1,467 | 4.852217 | 0.152709 | 0.091371 | 0.109645 | 0.182741 | 0.715736 | 0.715736 | 0.715736 | 0.637563 | 0.605076 | 0.563452 | 0 | 0.034874 | 0.159509 | 1,467 | 40 | 81 | 36.675 | 0.76399 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.083333 | 0.333333 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
80caeb658d081ea0b3726b9bab9210e4cf442b67 | 556 | py | Python | unb_cli/venv.py | jacktrades/unb-cli | 51cb451fc66352ca85ded03bfbc3bf01913f33ca | [
"MIT"
] | null | null | null | unb_cli/venv.py | jacktrades/unb-cli | 51cb451fc66352ca85ded03bfbc3bf01913f33ca | [
"MIT"
] | null | null | null | unb_cli/venv.py | jacktrades/unb-cli | 51cb451fc66352ca85ded03bfbc3bf01913f33ca | [
"MIT"
] | null | null | null | """Utilities for working with virtual environments."""
import sys
def in_venv():
# NOTE:
# If you are using virtualenv (github.com/pypa/virtualenv), this answer is
# equally correct for Python 2 or Python 3. If you are using pyvenv
# (legacy.python.org/dev/peps/pep-0405), a virtualenv-equivalent built into
# Python 3.3+ (but not the same thing as virtualenv), then it uses
# sys.base_prefix instead of sys.real_prefix, and sys.base_prefix always
# exists; outside a pyvenv it is equal to sys.prefix.
return hasattr(sys, 'real_prefix')
| 37.066667 | 77 | 0.735612 | 90 | 556 | 4.488889 | 0.688889 | 0.024752 | 0.039604 | 0.064356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017354 | 0.170863 | 556 | 14 | 78 | 39.714286 | 0.859002 | 0.820144 | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0.333333 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
80f4c5da06f46f91e5327a5ff34e96af0a3891e2 | 1,015 | py | Python | tests/test_mixin.py | thruflo/pyramid_basemodel | 4fd264072b07b96f2413d7199a5c25f9229c9db0 | [
"Unlicense"
] | 9 | 2015-04-08T08:25:34.000Z | 2020-07-20T11:59:49.000Z | tests/test_mixin.py | fizyk/pyramid_basemodel | 63c1f78ad2c3cd9b00579ec00b6855adbabb531a | [
"Unlicense"
] | 164 | 2020-07-31T12:49:48.000Z | 2022-03-29T04:09:28.000Z | tests/test_mixin.py | thruflo/pyramid_basemodel | 4fd264072b07b96f2413d7199a5c25f9229c9db0 | [
"Unlicense"
] | 8 | 2015-02-25T02:34:25.000Z | 2020-03-17T11:51:10.000Z | from mock import Mock, patch
from pyramid_basemodel.mixin import TouchMixin
def test_touch_mixin():
"""Check wether every argument of TouchMixin get's called in proper order."""
t = TouchMixin()
saved_arg = []
def save_mock(instance):
saved_arg.append(instance)
assert not hasattr(t, "modified")
with patch.object(t, "propagate_touch") as propagate_mock:
t.touch(now=Mock, save=save_mock)
assert propagate_mock.called
assert hasattr(t, "modified")
assert t == saved_arg[0]
def test_touch_mixin_no_propagate():
"""Check wether every argument of TouchMixin get's called in proper order."""
t = TouchMixin()
saved_arg = []
def save_mock(instance):
saved_arg.append(instance)
assert not hasattr(t, "modified")
with patch.object(t, "propagate_touch") as propagate_mock:
t.touch(False, now=Mock, save=save_mock)
assert not propagate_mock.called
assert hasattr(t, "modified")
assert t == saved_arg[0]
| 28.194444 | 81 | 0.683744 | 139 | 1,015 | 4.827338 | 0.28777 | 0.071535 | 0.09538 | 0.050671 | 0.825633 | 0.825633 | 0.751118 | 0.751118 | 0.751118 | 0.751118 | 0 | 0.002503 | 0.212808 | 1,015 | 35 | 82 | 29 | 0.837297 | 0.140887 | 0 | 0.666667 | 0 | 0 | 0.072009 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
038241c72f04812024e5fbe27494cb84d327adb2 | 43 | py | Python | pybrain/structure/networks/custom/__init__.py | sveilleux1/pybrain | 1e1de73142c290edb84e29ca7850835f3e7bca8b | [
"BSD-3-Clause"
] | 2,208 | 2015-01-02T02:14:41.000Z | 2022-03-31T04:45:46.000Z | pybrain/structure/networks/custom/__init__.py | sveilleux1/pybrain | 1e1de73142c290edb84e29ca7850835f3e7bca8b | [
"BSD-3-Clause"
] | 91 | 2015-01-08T16:42:16.000Z | 2021-12-11T19:16:35.000Z | pybrain/structure/networks/custom/__init__.py | sveilleux1/pybrain | 1e1de73142c290edb84e29ca7850835f3e7bca8b | [
"BSD-3-Clause"
] | 786 | 2015-01-02T15:18:20.000Z | 2022-02-23T23:42:40.000Z | from .capturegame import CaptureGameNetwork | 43 | 43 | 0.906977 | 4 | 43 | 9.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069767 | 43 | 1 | 43 | 43 | 0.975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
03b98d357c6c5972ca3b602fe8fc7c8312cd4440 | 44 | py | Python | extensions/__init__.py | apockill/webcam-ml | f001688cf891c44b407823cc866d8ae9bdc4c51b | [
"MIT"
] | null | null | null | extensions/__init__.py | apockill/webcam-ml | f001688cf891c44b407823cc866d8ae9bdc4c51b | [
"MIT"
] | null | null | null | extensions/__init__.py | apockill/webcam-ml | f001688cf891c44b407823cc866d8ae9bdc4c51b | [
"MIT"
] | null | null | null | from .only_masks import process as extension | 44 | 44 | 0.863636 | 7 | 44 | 5.285714 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.113636 | 44 | 1 | 44 | 44 | 0.948718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
03c4a9ca9ba4c00d2dfa5e7c3472deed26895afd | 138 | py | Python | scripts/npc/autogen_9400101.py | hsienjan/SideQuest-Server | 3e88debaf45615b759d999255908f99a15283695 | [
"MIT"
] | null | null | null | scripts/npc/autogen_9400101.py | hsienjan/SideQuest-Server | 3e88debaf45615b759d999255908f99a15283695 | [
"MIT"
] | null | null | null | scripts/npc/autogen_9400101.py | hsienjan/SideQuest-Server | 3e88debaf45615b759d999255908f99a15283695 | [
"MIT"
] | null | null | null | # ParentID: 9400101
# ObjectID: 1000047
# Character field ID when accessed: 100000000
# Object Position X: 2525
# Object Position Y: -199
| 23 | 45 | 0.753623 | 18 | 138 | 5.777778 | 0.888889 | 0.269231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.26087 | 0.166667 | 138 | 5 | 46 | 27.6 | 0.643478 | 0.92029 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
03d569e282af33d46631001a92eba1ea9afd1b88 | 79 | py | Python | iotedgedev/version.py | vikas0212git/iotedgedev | ee6108b2cf8e9e006f83f19fcb1a94a65ffad93a | [
"MIT"
] | 111 | 2018-04-09T18:24:30.000Z | 2022-03-29T12:12:50.000Z | iotedgedev/version.py | nittaya1990/iotedgedev | d35c7d5d6112a1e26acb0104a577e59ea9378ca0 | [
"MIT"
] | 314 | 2018-04-09T19:59:27.000Z | 2022-03-28T12:13:45.000Z | iotedgedev/version.py | nittaya1990/iotedgedev | d35c7d5d6112a1e26acb0104a577e59ea9378ca0 | [
"MIT"
] | 45 | 2018-04-09T21:52:23.000Z | 2022-03-23T12:48:01.000Z | import sys
PY35 = sys.version_info >= (3, 5)
PY3 = sys.version_info >= (3, 0)
| 15.8 | 33 | 0.632911 | 14 | 79 | 3.428571 | 0.642857 | 0.416667 | 0.583333 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.109375 | 0.189873 | 79 | 4 | 34 | 19.75 | 0.640625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
03ffa9cb762f8e66ed624a1c7a14ce90642c1c54 | 48 | py | Python | src/ekpmeasure/analysis/ppms/__init__.py | cjfinnell/ekpmeasure | e6611c053cad28e06f4f8a94764ebe3805cddb15 | [
"MIT"
] | null | null | null | src/ekpmeasure/analysis/ppms/__init__.py | cjfinnell/ekpmeasure | e6611c053cad28e06f4f8a94764ebe3805cddb15 | [
"MIT"
] | null | null | null | src/ekpmeasure/analysis/ppms/__init__.py | cjfinnell/ekpmeasure | e6611c053cad28e06f4f8a94764ebe3805cddb15 | [
"MIT"
] | null | null | null | from ._load import *
from ._data_funcs import *
| 16 | 26 | 0.75 | 7 | 48 | 4.714286 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 48 | 2 | 27 | 24 | 0.825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
209b697412176cfed64eb622863bf6f4820fe0a3 | 14,098 | py | Python | data_utils.py | plasmatiger/Adversary_ML | 604f936abed54aa623f197cb100bbad7a3763732 | [
"MIT"
] | null | null | null | data_utils.py | plasmatiger/Adversary_ML | 604f936abed54aa623f197cb100bbad7a3763732 | [
"MIT"
] | null | null | null | data_utils.py | plasmatiger/Adversary_ML | 604f936abed54aa623f197cb100bbad7a3763732 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
from PIL import Image
import skimage.io as skio
from slic import slic_gaussian
def read_image(address):
#image = Image.open(add).resize((im_size, im_size))
image = Image.open(address)
image = image.convert("RGB")
image = np.array(image)/255.0
#image = image.reshape((1, im_size, im_size, 3))
return image
def load_cifar(cache_dir):
''' Loads and returns CIFAR10 dataset
Mode = 0 : Load from cached files (saves time)
Mode = 1 : Load fresh by reading data from individual files
'''
train_size = 40000
test_size = 10000
val_size = 10000
images_train = np.load(cache_dir + "/cifar_X_train.npy")
images_test = np.load(cache_dir + "/cifar_X_test.npy")
images_val = np.load(cache_dir + "/cifar_X_val.npy")
labels_train = np.load(cache_dir + "/cifar_Y_train.npy")
labels_test = np.load(cache_dir + "/cifar_Y_test.npy")
labels_val = np.load(cache_dir + "/cifar_Y_val.npy")
return images_train, images_test, images_val, labels_train, labels_test, labels_val
def load_fashion_mnist(cache_dir):
''' Loads and returns Fashion MNIST dataset
Mode = 0 : Load from cached files (saves time)
Mode = 1 : Load fresh by reading data from individual files
'''
train_size = 50000
test_size = 10000
val_size = 10000
images_train = np.load(cache_dir + "/fashion_X_train.npy")
images_test = np.load(cache_dir + "/fashion_X_test.npy")
images_val = np.load(cache_dir + "/fashion_X_val.npy")
labels_train = np.load(cache_dir + "/fashion_Y_train.npy")
labels_test = np.load(cache_dir + "/fashion_Y_test.npy")
labels_val = np.load(cache_dir + "/fashion_Y_val.npy")
return images_train, images_test, images_val, labels_train, labels_test, labels_val
def load_cifar_sp(cache_dir):
''' Loads and returns CIFAR10 dataset
Mode = 0 : Load from cached files (saves time)
Mode = 1 : Load fresh by reading data from individual files
'''
train_size = 40000
test_size = 10000
val_size = 10000
images_train = np.load(cache_dir + "/cifar_sp_X_train.npy")
images_test = np.load(cache_dir + "/cifar_sp_X_test.npy")
images_val = np.load(cache_dir + "/cifar_sp_X_val.npy")
labels_train = np.load(cache_dir + "/cifar_sp_Y_train.npy")
labels_test = np.load(cache_dir + "/cifar_sp_Y_test.npy")
labels_val = np.load(cache_dir + "/cifar_sp_Y_val.npy")
return images_train, images_test, images_val, labels_train, labels_test, labels_val
def load_fashion_mnist_sp(cache_dir):
''' Loads and returns Fashion MNIST dataset
Mode = 0 : Load from cached files (saves time)
Mode = 1 : Load fresh by reading data from individual files
'''
train_size = 50000
test_size = 10000
val_size = 10000
images_train = np.load(cache_dir + "/fashion_sp_X_train.npy")
images_test = np.load(cache_dir + "/fashion_sp_X_test.npy")
images_val = np.load(cache_dir + "/fashion_sp_X_val.npy")
labels_train = np.load(cache_dir + "/fashion_sp_Y_train.npy")
labels_test = np.load(cache_dir + "/fashion_sp_Y_test.npy")
labels_val = np.load(cache_dir + "/fashion_sp_Y_val.npy")
return images_train, images_test, images_val, labels_train, labels_test, labels_val
def cache_cifar(base_dir, cache_dir):
# Metadata
num_classes = 10
train_size = 40000
test_size = 10000
val_size = 10000
im_chan = 3
im_size = 32
clas_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
clas_dict = {'airplane':0, 'automobile':1, 'bird':2, 'cat':3, 'deer':4, 'dog':5, 'frog':6, 'horse':7, 'ship':8, 'truck':9}
classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
all_train_img_add = []
all_train_labels = []
train_img_add = []
train_labels = []
test_img_add = []
test_labels = []
val_img_add = []
val_labels = []
# Reading file lists
im_dir = base_dir + "train/"
for root, dirs, files in os.walk(im_dir):
for name in files:
add = im_dir + name
all_train_img_add.append(add)
for clas in clas_names:
if clas in add:
all_train_labels.append(clas_dict[clas])
im_dir = base_dir + "test/"
for root, dirs, files in os.walk(im_dir):
for name in files:
add = im_dir + name
test_img_add.append(add)
for clas in clas_names:
if clas in add:
test_labels.append(clas_dict[clas])
a = list(range(len(all_train_img_add)))
np.random.shuffle(a)
for i in range(train_size):
train_img_add.append(all_train_img_add[a[i]])
for i in range(train_size):
train_labels.append(all_train_labels[a[i]])
for i in range(train_size, train_size + val_size):
val_img_add.append(all_train_img_add[a[i]])
for i in range(train_size, train_size + val_size):
val_labels.append(all_train_labels[a[i]])
# Loop to load the data in Numpy array
# Reading the training data ==========================================
images_train = np.ndarray(shape=(train_size, im_size, im_size, im_chan))
labels_train = np.zeros(shape=(train_size, num_classes))
start = 0
end = train_size
for ind, im_index in enumerate(range(start, end)):
a = im_index
images_train[ind, :, :, :] = read_image(train_img_add[im_index])
lab = train_labels[im_index]
labels_train[ind, lab] = 1
# ====================================================================
# Reading the testing data ===========================================
images_test = np.ndarray(shape=(test_size, im_size, im_size, im_chan))
labels_test = np.zeros(shape=(test_size, num_classes))
start = 0
end = test_size
for ind, im_index in enumerate(range(start, end)):
a =im_index
images_test[ind, :, :, :] = read_image(test_img_add[im_index])
lab = test_labels[im_index]
labels_test[ind, lab] = 1
# ====================================================================
# Reading the validation data ========================================
images_val = np.ndarray(shape=(val_size, im_size, im_size, im_chan))
labels_val = np.zeros(shape=(val_size, num_classes))
start = 0
end = val_size
for ind, im_index in enumerate(range(start, end)):
a =im_index
images_val[ind, :, :, :] = read_image(val_img_add[im_index])
lab = val_labels[im_index]
labels_val[ind, lab] = 1
# ====================================================================
# Save as numpy array ================================================
np.save(cache_dir + "/cifar_X_train.npy", images_train)
np.save(cache_dir + "/cifar_X_test.npy" , images_test)
np.save(cache_dir + "/cifar_X_val.npy" , images_val)
np.save(cache_dir + "/cifar_Y_train.npy", labels_train)
np.save(cache_dir + "/cifar_Y_test.npy" , labels_test)
np.save(cache_dir + "/cifar_Y_val.npy" , labels_val)
def cache_fashion_mnist(base_dir, cache_dir):
# Metadata
num_classes = 10
train_size = 50000
test_size = 10000
val_size = 10000
im_chan = 3
im_size = 28
classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
# Reading file lists
im_dir = base_dir + "/train/"
train_img_add = []
train_labels = []
test_img_add = []
test_labels = []
val_img_add = []
val_labels = []
for ind, clas in enumerate(classes):
im_dir = base_dir + "/train/" + clas
for root, dirs, files in os.walk(im_dir):
for name in files:
add = im_dir + "/" + name
train_img_add.append(add)
train_labels.append(ind)
im_dir = base_dir + "/test/" + clas
for root, dirs, files in os.walk(im_dir):
for name in files:
add = im_dir + "/" + name
test_img_add.append(add)
test_labels.append(ind)
im_dir = base_dir + "/val/" + clas
for root, dirs, files in os.walk(im_dir):
for name in files:
add = im_dir + "/" + name
val_img_add.append(add)
val_labels.append(ind)
# Loop to load the data in Numpy array
# Reading the training data ==========================================
images_train = np.ndarray(shape=(train_size, im_size, im_size, im_chan))
labels_train = np.zeros(shape=(train_size, num_classes))
start = 0
end = train_size
for ind, im_index in enumerate(range(start, end)):
a = im_index
images_train[ind, :, :, :] = read_image(train_img_add[im_index])
lab = train_labels[im_index]
labels_train[ind, lab] = 1
# ====================================================================
# Reading the testing data ===========================================
images_test = np.ndarray(shape=(test_size, im_size, im_size, im_chan))
labels_test = np.zeros(shape=(test_size, num_classes))
start = 0
end = test_size
for ind, im_index in enumerate(range(start, end)):
a =im_index
images_test[ind, :, :, :] = read_image(test_img_add[im_index])
lab = test_labels[im_index]
labels_test[ind, lab] = 1
# ====================================================================
# Reading the validation data ========================================
images_val = np.ndarray(shape=(val_size, im_size, im_size, im_chan))
labels_val = np.zeros(shape=(val_size, num_classes))
start = 0
end = val_size
for ind, im_index in enumerate(range(start, end)):
a =im_index
images_val[ind, :, :, :] = read_image(val_img_add[im_index])
lab = val_labels[im_index]
labels_val[ind, lab] = 1
# ====================================================================
# Save as numpy array ================================================
np.save(cache_dir + "/fashion_X_train.npy", images_train)
np.save(cache_dir + "/fashion_X_test.npy" , images_test)
np.save(cache_dir + "/fashion_X_val.npy" , images_val)
np.save(cache_dir + "/fashion_Y_train.npy", labels_train)
np.save(cache_dir + "/fashion_Y_test.npy" , labels_test)
np.save(cache_dir + "/fashion_Y_val.npy" , labels_val)
def cache_cifar_sp(X_train, X_test, X_val, Y_train, Y_test, Y_val, cache_dir):
slic_args = {
'n_segments' : 256,
'compactness' : 5,
'sigma' : 1,
'gaussian_filter' : 0
}
images_train = np.ndarray(shape=X_train.shape)
images_test = np.ndarray(shape=X_test.shape)
images_val = np.ndarray(shape=X_val.shape)
l = X_train.shape[0]
for i in range(l):
# Generating SLIC images
images_train[i, :, :, :] = slic_gaussian(image=X_train[i, :, :, :], args=slic_args)
print("Loop1")
l = X_test.shape[0]
for i in range(l):
# Generating SLIC images
images_test[i, :, :, :] = slic_gaussian(image=X_test[i, :, :, :], args=slic_args)
ima = images_test[i, :, :, :]
skio.imsave("./sample/slic/cifar/" + str(i) + ".png", ima)
print("Loop2")
l = X_val.shape[0]
for i in range(l):
# Generating SLIC images
images_val[i, :, :, :] = slic_gaussian(image=X_val[i, :, :, :], args=slic_args)
print("Loop3")
labels_train = np.copy(Y_train)
labels_test = np.copy(Y_test)
labels_val = np.copy(Y_val)
# Save as numpy array ================================================
np.save(cache_dir + "/cifar_sp_X_train.npy", images_train)
np.save(cache_dir + "/cifar_sp_X_test.npy" , images_test)
np.save(cache_dir + "/cifar_sp_X_val.npy" , images_val)
np.save(cache_dir + "/cifar_sp_Y_train.npy", labels_train)
np.save(cache_dir + "/cifar_sp_Y_test.npy" , labels_test)
np.save(cache_dir + "/cifar_sp_Y_val.npy" , labels_val)
def cache_fashion_sp(X_train, X_test, X_val, Y_train, Y_test, Y_val, cache_dir):
slic_args = {
'n_segments' : 256,
'compactness' : 5,
'sigma' : 1,
'gaussian_filter' : 0
}
images_train = np.ndarray(shape=X_train.shape)
images_test = np.ndarray(shape=X_test.shape)
images_val = np.ndarray(shape=X_val.shape)
l = X_train.shape[0]
for i in range(l):
# Generating SLIC images
images_train[i, :, :, :] = slic_gaussian(image=X_train[i, :, :, :], args=slic_args)
print("Loop1")
l = X_test.shape[0]
for i in range(l):
# Generating SLIC images
images_test[i, :, :, :] = slic_gaussian(image=X_test[i, :, :, :], args=slic_args)
ima = images_test[i, :, :, :]
skio.imsave("./sample/slic/fashion/" + str(i) + ".png", ima)
print("Loop2")
l = X_val.shape[0]
for i in range(l):
# Generating SLIC images
images_val[i, :, :, :] = slic_gaussian(image=X_val[i, :, :, :], args=slic_args)
print("Loop3")
labels_train = np.copy(Y_train)
labels_test = np.copy(Y_test)
labels_val = np.copy(Y_val)
# Save as numpy array ================================================
np.save(cache_dir + "/fashion_sp_X_train.npy", images_train)
np.save(cache_dir + "/fashion_sp_X_test.npy" , images_test)
np.save(cache_dir + "/fashion_sp_X_val.npy" , images_val)
np.save(cache_dir + "/fashion_sp_Y_train.npy", labels_train)
np.save(cache_dir + "/fashion_sp_Y_test.npy" , labels_test)
np.save(cache_dir + "/fashion_sp_Y_val.npy" , labels_val)
'''
if __name__ == "__main__":
print("Started!")
X_train, X_test, X_val, Y_train, Y_test, Y_val = load_cifar("./cache")
print("Loaded!")
cache_cifar_sp(X_train, X_test, X_val, Y_train, Y_test, Y_val, "./cache")
print("Done!")
''' | 34.638821 | 126 | 0.591219 | 1,994 | 14,098 | 3.869107 | 0.077733 | 0.058069 | 0.034219 | 0.043552 | 0.903694 | 0.88814 | 0.886455 | 0.865327 | 0.848736 | 0.844329 | 0 | 0.016923 | 0.228756 | 14,098 | 407 | 127 | 34.638821 | 0.692633 | 0.14534 | 0 | 0.623134 | 0 | 0 | 0.107757 | 0.031643 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033582 | false | 0 | 0.033582 | 0 | 0.085821 | 0.026119 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
20b4f05855979e6da3839e203e556656092ea923 | 205 | py | Python | prestify/__init__.py | omarkhd/prestify-client-py | 8a7f08dde2a0986fff56bcbcbbdf61713f156667 | [
"MIT"
] | null | null | null | prestify/__init__.py | omarkhd/prestify-client-py | 8a7f08dde2a0986fff56bcbcbbdf61713f156667 | [
"MIT"
] | null | null | null | prestify/__init__.py | omarkhd/prestify-client-py | 8a7f08dde2a0986fff56bcbcbbdf61713f156667 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from prestify.client import Report
try:
from django.conf import settings
Report.PRESTIFY_SERVICE_URL = settings.PRESTIFY_SERVICE_URL
except (ImportError, AttributeError):
pass
| 20.5 | 60 | 0.780488 | 26 | 205 | 6 | 0.692308 | 0.192308 | 0.230769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005587 | 0.126829 | 205 | 9 | 61 | 22.777778 | 0.865922 | 0.102439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.166667 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 6 |
20bb77e2f6bb8436105a15ba9425d049ae1d93ae | 1,459 | py | Python | starwars/views.py | chrischongyj/NTU-Star-Wars | f26447035e6e18ad358787f4dd3f4116ac8e437f | [
"MIT"
] | null | null | null | starwars/views.py | chrischongyj/NTU-Star-Wars | f26447035e6e18ad358787f4dd3f4116ac8e437f | [
"MIT"
] | 6 | 2020-06-06T00:02:01.000Z | 2022-02-10T13:49:06.000Z | starwars/views.py | chrischongyj/NTU-Star-Wars | f26447035e6e18ad358787f4dd3f4116ac8e437f | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import CourseInfo
def welcome(request):
return render(request, './starwars/welcome.html', {'title': 'Welcome to End Star Wars'})
def havecourse(request):
return render(request, './starwars/test.html')
def addcourse(request):
course_name = request.POST["course_name"]
course_code = request.POST["course_code"]
course_au = request.POST["course_au"]
course_info = CourseInfo(course_name=course_name, course_code=course_code, course_au=course_au)
course_info.save()
all_courses = CourseInfo.objects.all()
return render(request, './starwars/want.html', {'Courses': all_courses})
def selectcourse(request):
course_name = request.POST["course_name"]
course_code = request.POST["course_code"]
course_au = request.POST["course_au"]
my_obj = CourseInfo.objects.get(course_name=course_name, course_code=course_code, course_au=course_au)
my_obj.delete()
all_courses = CourseInfo.objects.all()
return render(request, './starwars/want.html', {'Courses': all_courses})
def wantcourse(request):
# course_name = request.POST["course_name"]
# course_code = request.POST["course_code"]
# course_au = request.POST["course_au"]
# my_obj = CourseInfo.objects.get(course_name=course_name)
# my_obj.delete()
all_courses = CourseInfo.objects.all()
return render(request, './starwars/want.html', {'Courses': all_courses})
| 30.395833 | 106 | 0.7183 | 188 | 1,459 | 5.340426 | 0.202128 | 0.119522 | 0.15239 | 0.134462 | 0.808765 | 0.741036 | 0.741036 | 0.741036 | 0.741036 | 0.741036 | 0 | 0 | 0.148047 | 1,459 | 47 | 107 | 31.042553 | 0.807723 | 0.132968 | 0 | 0.48 | 0 | 0 | 0.170906 | 0.018283 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.08 | 0.08 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
20f3987a5b3d131301ddcc31f04b60919e68ca94 | 9,381 | py | Python | app/tests/teams_tests/test_views.py | Tommos0/grand-challenge.org | 187cd857f6a7c9651b7bda8c42c54801f071dd7c | [
"Apache-2.0"
] | null | null | null | app/tests/teams_tests/test_views.py | Tommos0/grand-challenge.org | 187cd857f6a7c9651b7bda8c42c54801f071dd7c | [
"Apache-2.0"
] | null | null | null | app/tests/teams_tests/test_views.py | Tommos0/grand-challenge.org | 187cd857f6a7c9651b7bda8c42c54801f071dd7c | [
"Apache-2.0"
] | null | null | null | import pytest
from django.conf import settings
from django.test import Client
from tests.factories import TeamFactory, TeamMemberFactory
from tests.utils import (
get_view_for_user,
assert_viewname_status,
assert_viewname_redirect,
validate_admin_or_participant_view,
validate_open_view,
)
def validate_owner_or_admin_view(
*, two_challenge_set, client: Client, **kwargs
):
""" Assert that a view is only accessible to administrators or participants
of that particular challenge """
# No user
assert_viewname_redirect(
redirect_url=settings.LOGIN_URL,
challenge=two_challenge_set.ChallengeSet1.challenge,
client=client,
**kwargs,
)
tests = [
(403, two_challenge_set.ChallengeSet1.non_participant),
(200, two_challenge_set.ChallengeSet1.participant),
(403, two_challenge_set.ChallengeSet1.participant1),
(200, two_challenge_set.ChallengeSet1.creator),
(200, two_challenge_set.ChallengeSet1.admin),
(403, two_challenge_set.ChallengeSet2.non_participant),
(403, two_challenge_set.ChallengeSet2.participant),
(403, two_challenge_set.ChallengeSet2.participant1),
(403, two_challenge_set.ChallengeSet2.creator),
(403, two_challenge_set.ChallengeSet2.admin),
(200, two_challenge_set.admin12),
(403, two_challenge_set.participant12),
(200, two_challenge_set.admin1participant2),
]
for test in tests:
assert_viewname_status(
code=test[0],
challenge=two_challenge_set.ChallengeSet1.challenge,
client=client,
user=test[1],
**kwargs,
)
def validate_member_owner_or_admin_view(
*, two_challenge_set, client: Client, **kwargs
):
""" Assert that a view is only accessible to administrators or participants
of that particular challenge """
# No user
assert_viewname_redirect(
redirect_url=settings.LOGIN_URL,
challenge=two_challenge_set.ChallengeSet1.challenge,
client=client,
**kwargs,
)
tests = [
(403, two_challenge_set.ChallengeSet1.non_participant),
(200, two_challenge_set.ChallengeSet1.participant),
(200, two_challenge_set.ChallengeSet1.participant1),
(200, two_challenge_set.ChallengeSet1.creator),
(200, two_challenge_set.ChallengeSet1.admin),
(403, two_challenge_set.ChallengeSet2.non_participant),
(403, two_challenge_set.ChallengeSet2.participant),
(403, two_challenge_set.ChallengeSet2.participant1),
(403, two_challenge_set.ChallengeSet2.creator),
(403, two_challenge_set.ChallengeSet2.admin),
(200, two_challenge_set.admin12),
(403, two_challenge_set.participant12),
(200, two_challenge_set.admin1participant2),
]
for test in tests:
assert_viewname_status(
code=test[0],
challenge=two_challenge_set.ChallengeSet1.challenge,
client=client,
user=test[1],
**kwargs,
)
@pytest.mark.django_db
@pytest.mark.parametrize(
"view", ["teams:list", "teams:create", "teams:member-create"]
)
def test_admin_or_participant_permissions(client, TwoChallengeSets, view):
team = TeamFactory(
challenge=TwoChallengeSets.ChallengeSet1.challenge,
owner=TwoChallengeSets.ChallengeSet1.participant,
)
if view in ("teams:detail", "teams:member-create"):
pk = team.pk
else:
pk = None
validate_admin_or_participant_view(
viewname=view,
reverse_kwargs={"pk": pk},
two_challenge_set=TwoChallengeSets,
client=client,
)
@pytest.mark.django_db
def test_open_views(client, ChallengeSet):
team = TeamFactory(
challenge=ChallengeSet.challenge, owner=ChallengeSet.participant
)
validate_open_view(
viewname="teams:detail",
reverse_kwargs={"pk": team.pk},
challenge_set=ChallengeSet,
client=client,
)
@pytest.mark.django_db
@pytest.mark.parametrize("view", ["teams:update", "teams:delete"])
def test_team_update_delete_permissions(client, TwoChallengeSets, view):
team = TeamFactory(
challenge=TwoChallengeSets.ChallengeSet1.challenge,
owner=TwoChallengeSets.ChallengeSet1.participant,
)
TeamFactory(
challenge=TwoChallengeSets.ChallengeSet1.challenge,
owner=TwoChallengeSets.ChallengeSet1.participant1,
)
validate_owner_or_admin_view(
viewname=view,
reverse_kwargs={"pk": team.pk},
two_challenge_set=TwoChallengeSets,
client=client,
)
@pytest.mark.django_db
def test_team_member_delete_permissions(client, TwoChallengeSets):
team = TeamFactory(
challenge=TwoChallengeSets.ChallengeSet1.challenge,
owner=TwoChallengeSets.ChallengeSet1.participant,
)
team_member = TeamMemberFactory(
team=team, user=TwoChallengeSets.ChallengeSet1.participant1
)
validate_member_owner_or_admin_view(
viewname="teams:member-delete",
reverse_kwargs={"pk": team_member.pk},
two_challenge_set=TwoChallengeSets,
client=client,
)
@pytest.mark.django_db
@pytest.mark.parametrize("team_name", ["test_team_name"])
def test_team_creation(client, TwoChallengeSets, team_name):
response = get_view_for_user(
viewname="teams:create",
challenge=TwoChallengeSets.ChallengeSet1.challenge,
client=client,
method=client.post,
user=TwoChallengeSets.ChallengeSet1.participant,
data={"name": team_name},
)
assert response.status_code == 302
response = get_view_for_user(
url=response.url,
client=client,
user=TwoChallengeSets.ChallengeSet1.participant,
)
assert response.status_code == 200
assert team_name in response.rendered_content
@pytest.mark.django_db
def test_team_member_addition(client, TwoChallengeSets):
team = TeamFactory(
challenge=TwoChallengeSets.ChallengeSet1.challenge,
owner=TwoChallengeSets.ChallengeSet1.participant,
)
assert TwoChallengeSets.ChallengeSet1.participant in team.get_members()
assert (
TwoChallengeSets.ChallengeSet1.participant1 not in team.get_members()
)
# Participant1 requests to join team
response = get_view_for_user(
viewname="teams:member-create",
challenge=TwoChallengeSets.ChallengeSet1.challenge,
client=client,
method=client.post,
user=TwoChallengeSets.ChallengeSet1.participant1,
reverse_kwargs={"pk": team.pk},
)
assert TwoChallengeSets.ChallengeSet1.participant1 in team.get_members()
assert response.status_code == 302
@pytest.mark.django_db
def test_unique_membership(client, TwoChallengeSets):
team = TeamFactory(
challenge=TwoChallengeSets.ChallengeSet1.challenge,
owner=TwoChallengeSets.ChallengeSet1.participant,
)
team1 = TeamFactory(
challenge=TwoChallengeSets.ChallengeSet1.challenge,
owner=TwoChallengeSets.ChallengeSet1.participant1,
)
# Try to create a new team, should be denied
response = get_view_for_user(
viewname="teams:create",
challenge=TwoChallengeSets.ChallengeSet1.challenge,
client=client,
method=client.post,
user=TwoChallengeSets.ChallengeSet1.participant,
data={"name": "thisteamshouldnotbecreated"},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
# Participant1 requests to join team, should be denied
response = get_view_for_user(
viewname="teams:member-create",
challenge=TwoChallengeSets.ChallengeSet1.challenge,
client=client,
method=client.post,
user=TwoChallengeSets.ChallengeSet1.participant1,
reverse_kwargs={"pk": team.pk},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
# participant12 should be able to create a team in their challenge and join another
response = get_view_for_user(
viewname="teams:create",
challenge=TwoChallengeSets.ChallengeSet2.challenge,
client=client,
method=client.post,
user=TwoChallengeSets.participant12,
data={"name": "thisteamshouldbecreated"},
)
assert response.status_code == 302
response = get_view_for_user(
viewname="teams:member-create",
challenge=TwoChallengeSets.ChallengeSet1.challenge,
client=client,
method=client.post,
user=TwoChallengeSets.participant12,
reverse_kwargs={"pk": team.pk},
)
assert response.status_code == 302
assert TwoChallengeSets.participant12 in team.get_members()
response = get_view_for_user(
viewname="teams:member-create",
challenge=TwoChallengeSets.ChallengeSet1.challenge,
client=client,
method=client.post,
user=TwoChallengeSets.participant12,
reverse_kwargs={"pk": team1.pk},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
| 34.237226 | 87 | 0.694169 | 969 | 9,381 | 6.505676 | 0.114551 | 0.068528 | 0.08328 | 0.04283 | 0.804886 | 0.76618 | 0.739689 | 0.739689 | 0.732551 | 0.66323 | 0 | 0.026817 | 0.216928 | 9,381 | 273 | 88 | 34.362637 | 0.831337 | 0.046157 | 0 | 0.643443 | 0 | 0 | 0.058731 | 0.005492 | 0 | 0 | 0 | 0 | 0.090164 | 1 | 0.036885 | false | 0 | 0.020492 | 0 | 0.057377 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
456d22797f770af0d6c3e3c1d73e86251ceb6241 | 28 | py | Python | src/config/__init__.py | supernlogn/squeezeDetTL | 473be9c6c9081c6b1bd5622fbed4af6453576895 | [
"MIT"
] | 10 | 2018-11-13T14:18:11.000Z | 2020-04-29T09:35:47.000Z | src/config/__init__.py | supernlogn/squeezeDetTL | 473be9c6c9081c6b1bd5622fbed4af6453576895 | [
"MIT"
] | 3 | 2018-12-26T06:10:09.000Z | 2021-11-23T22:23:10.000Z | src/config/__init__.py | supernlogn/squeezeDetTL | 473be9c6c9081c6b1bd5622fbed4af6453576895 | [
"MIT"
] | 1 | 2020-12-08T12:36:43.000Z | 2020-12-08T12:36:43.000Z | from config_cooker import *
| 14 | 27 | 0.821429 | 4 | 28 | 5.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 28 | 1 | 28 | 28 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
45768d9dbdaa1a29d486a1885c16b7e4c2f4c7ae | 20,208 | py | Python | graphs/models/denseblock.py | hagerrady13/CondenseNet-PyTorch | 93fb0d4b79d50f26988a50ed1e53f0df68f80264 | [
"MIT"
] | 8 | 2018-07-30T06:49:06.000Z | 2021-07-28T15:18:40.000Z | graphs/models/denseblock.py | hagerrady13/CondenseNet-PyTorch | 93fb0d4b79d50f26988a50ed1e53f0df68f80264 | [
"MIT"
] | 7 | 2019-07-23T08:03:59.000Z | 2022-03-11T23:30:25.000Z | graphs/models/denseblock.py | hagerrady13/CondenseNet-PyTorch | 93fb0d4b79d50f26988a50ed1e53f0df68f80264 | [
"MIT"
] | 3 | 2018-07-29T21:49:50.000Z | 2021-03-26T06:27:39.000Z | """
Definitions for custom blocks
"""
import torch
import torch.nn as nn
from graphs.models.layers import LearnedGroupConv
class DenseBlock(nn.Sequential):
def __init__(self, num_layers, in_channels, growth_rate, config):
super().__init__()
for layer_id in range(num_layers):
layer = DenseLayer(in_channels=in_channels + (layer_id * growth_rate), growth_rate=growth_rate, config=config)
self.add_module('dense_layer_%d' % (layer_id + 1), layer)
class DenseLayer(nn.Module):
def __init__(self, in_channels, growth_rate, config):
super().__init__()
self.config = config
self.conv_bottleneck = self.config.conv_bottleneck
self.group1x1 = self.config.group1x1
self.group3x3 = self.config.group3x3
self.condense_factor = self.config.condense_factor
self.dropout_rate = self.config.dropout_rate
# 1x1 conv in_channels --> bottleneck*growth_rate
self.conv_1 = LearnedGroupConv(in_channels=in_channels, out_channels=self.conv_bottleneck * growth_rate, kernel_size=1,
groups=self.group1x1, condense_factor=self.condense_factor, dropout_rate=self.dropout_rate)
self.batch_norm = nn.BatchNorm2d(self.conv_bottleneck * growth_rate)
self.relu = nn.ReLU(inplace=True)
# 3x3 conv bottleneck*growth_rate --> growth_rate
self.conv_2 = nn.Conv2d(in_channels=self.conv_bottleneck * growth_rate, out_channels=growth_rate, kernel_size=3, padding=1, stride=1, groups=self.group3x3, bias=False)
def forward(self, x):
out = self.conv_1(x)
out = self.batch_norm(out)
out = self.relu(out)
out = self.conv_2(out)
return torch.cat([x, out], 1)
"""
---------------------------------
(denseblock_one): DenseBlock(
(dense_layer_1): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(16, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_2): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(24, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_3): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_4): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(40, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(40, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_5): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(48, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_6): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(56, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(56, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_7): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_8): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(72, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(72, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_9): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(80, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(80, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_10): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(88, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(88, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_11): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(96, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_12): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(104, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(104, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_13): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(112, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(112, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_14): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(120, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(120, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(32, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
)
---------------------------------
(denseblock_two): DenseBlock(
(dense_layer_1): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_2): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(144, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(144, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_3): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(160, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_4): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(176, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(176, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_5): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_6): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(208, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(208, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_7): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(224, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(224, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_8): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(240, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(240, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_9): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_10): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(272, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(272, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_11): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(288, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_12): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(304, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(304, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_13): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(320, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(320, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_14): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(336, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(336, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(64, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
)
---------------------------------
(denseblock_three): DenseBlock(
(dense_layer_1): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(352, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(352, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_2): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(384, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_3): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(416, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(416, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_4): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(448, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(448, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_5): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(480, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_6): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_7): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(544, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(544, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_8): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_9): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(608, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_10): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(640, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_11): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(672, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(672, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_12): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(704, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(704, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_13): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(736, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(736, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
(dense_layer_14): DenseLayer(
(conv_1): LearnedGroupConv(
(batch_norm): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv): Conv2d(768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(batch_norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU(inplace)
(conv_2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=4, bias=False)
)
)
---------------------------------
""" | 42.543158 | 175 | 0.652613 | 3,139 | 20,208 | 4.071997 | 0.040459 | 0.026287 | 0.131435 | 0.098576 | 0.905023 | 0.902832 | 0.897199 | 0.891723 | 0.891723 | 0.891723 | 0 | 0.105053 | 0.145982 | 20,208 | 475 | 176 | 42.543158 | 0.635589 | 0.006235 | 0 | 0.068966 | 0 | 0 | 0.00861 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.103448 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
45a070175c1cc76870a9c06ccac2945680ec224c | 11,193 | py | Python | diffxpy/unit_test/test_pairwise.py | SabrinaRichter/diffxpy | 8eff054ca3ce097533134f490aac3580431eee15 | [
"BSD-3-Clause"
] | null | null | null | diffxpy/unit_test/test_pairwise.py | SabrinaRichter/diffxpy | 8eff054ca3ce097533134f490aac3580431eee15 | [
"BSD-3-Clause"
] | null | null | null | diffxpy/unit_test/test_pairwise.py | SabrinaRichter/diffxpy | 8eff054ca3ce097533134f490aac3580431eee15 | [
"BSD-3-Clause"
] | null | null | null | import logging
import unittest
import numpy as np
import pandas as pd
import scipy.stats as stats
from batchglm.api.models.glm_nb import Simulator, Estimator, InputData
import diffxpy.api as de
class TestPairwiseNull(unittest.TestCase):
def test_null_distribution_ztest(self, n_cells: int = 2000, n_genes: int = 100, n_groups=2):
"""
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.num_observations)
})
test = de.test.pairwise(
data=sim.X,
grouping="condition",
test="z-test",
noise_model="nb",
sample_description=random_sample_description,
dtype="float64"
)
summary = test.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(test.pval[~np.eye(test.pval.shape[0]).astype(bool)].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0 is <= 0.05!"
return True
def test_null_distribution_z_lazy(self, n_cells: int = 2000, n_genes: int = 100):
"""
Test if de.pairwise() generates a uniform p-value distribution for lazy z-tests
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(4, size=sim.num_observations)
})
test = de.test.pairwise(
data=sim.X,
grouping="condition",
test='z-test',
lazy=True,
noise_model="nb",
pval_correction="global",
quick_scale=True,
sample_description=random_sample_description,
dtype="float64"
)
# Compare p-value distribution under null model against uniform distribution.
pvals = test.pval_pairs(groups0=0, groups1=1)
pval_h0 = stats.kstest(pvals.flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0 is <= 0.05!"
return True
def test_null_distribution_lrt(self, n_cells: int = 2000, n_genes: int = 100, n_groups=2):
"""
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.num_observations)
})
test = de.test.pairwise(
data=sim.X,
grouping="condition",
test="lrt",
noise_model="nb",
sample_description=random_sample_description,
dtype="float64"
)
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(test.pval[~np.eye(test.pval.shape[0]).astype(bool)].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0 is <= 0.05!"
return True
def test_null_distribution_ttest(self, n_cells: int = 2000, n_genes: int = 10000, n_groups=2):
"""
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.num_observations)
})
test = de.test.pairwise(
data=sim.X,
grouping="condition",
test="t-test",
sample_description=random_sample_description,
)
summary = test.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(test.pval[~np.eye(test.pval.shape[0]).astype(bool)].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0 is <= 0.05!"
return True
def test_null_distribution_wilcoxon(self, n_cells: int = 2000, n_genes: int = 10000, n_groups=2):
"""
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.num_observations)
})
test = de.test.pairwise(
data=sim.X,
grouping="condition",
test="wilcoxon",
sample_description=random_sample_description,
)
summary = test.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(test.pval[~np.eye(test.pval.shape[0]).astype(bool)].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0 is <= 0.05!"
return True
class TestPairwiseDE(unittest.TestCase):
def test_ztest_de(self, n_cells: int = 2000, n_genes: int = 500):
"""
Test if de.lrt() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
num_non_de = n_genes // 2
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=2)
# simulate: coefficients ~ log(N(1, 0.5)).
# re-sample if N(1, 0.5) <= 0
sim.generate_params(rand_fn=lambda shape: 1 + stats.truncnorm.rvs(-1 / 0.5, np.infty, scale=0.5, size=shape))
sim.params["a"][1, :num_non_de] = 0
sim.params["b"][1, :num_non_de] = 0
sim.params["isDE"] = ("features",), np.arange(n_genes) >= num_non_de
sim.generate_data()
sample_description = sim.sample_description
test = de.test.pairwise(
data=sim.X,
grouping="condition",
test="z-test",
noise_model="nb",
sample_description=sample_description,
)
summary = test.summary()
logging.getLogger("diffxpy").info('fraction of non-DE genes with q-value < 0.05: %.1f%%' %
float(100 * np.mean(
np.sum(test.qval[~np.eye(test.pval.shape[0]).astype(bool), :num_non_de] < 0.05) /
(2 * num_non_de)
)))
logging.getLogger("diffxpy").info('fraction of DE genes with q-value < 0.05: %.1f%%' %
float(100 * np.mean(
np.sum(test.qval[~np.eye(test.pval.shape[0]).astype(bool), num_non_de:] < 0.05) /
(2 * (n_genes - num_non_de))
)))
# TODO asserts
return True
if __name__ == '__main__':
unittest.main()
| 41.921348 | 117 | 0.644867 | 1,453 | 11,193 | 4.836889 | 0.113558 | 0.056915 | 0.042544 | 0.030734 | 0.894991 | 0.886027 | 0.870518 | 0.865111 | 0.861412 | 0.857285 | 0 | 0.020105 | 0.248995 | 11,193 | 266 | 118 | 42.078947 | 0.815965 | 0.254891 | 0 | 0.692308 | 0 | 0 | 0.118808 | 0 | 0 | 0 | 0 | 0.003759 | 0.032051 | 1 | 0.038462 | false | 0 | 0.044872 | 0 | 0.134615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
45b57c3093db5cb3160495cb03f73c0f5ee13cb4 | 29 | py | Python | sierra_api/__init__.py | alexvancina/sierra-api | 6fd5c04ac39569367db361d6a9d356d5fa3eb00f | [
"MIT"
] | 2 | 2020-07-21T18:16:55.000Z | 2022-03-14T19:48:04.000Z | sierra_api/__init__.py | alexvancina/sierra-api | 6fd5c04ac39569367db361d6a9d356d5fa3eb00f | [
"MIT"
] | null | null | null | sierra_api/__init__.py | alexvancina/sierra-api | 6fd5c04ac39569367db361d6a9d356d5fa3eb00f | [
"MIT"
] | null | null | null | from .sierra import SierraAPI | 29 | 29 | 0.862069 | 4 | 29 | 6.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 29 | 1 | 29 | 29 | 0.961538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
45b94aa8fe149085a3fe69fa5d36aa71d765346b | 43 | py | Python | plugins/flytekit-athena/flytekitplugins/athena/__init__.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | 1 | 2021-11-11T10:10:10.000Z | 2021-11-11T10:10:10.000Z | plugins/flytekit-athena/flytekitplugins/athena/__init__.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | 2 | 2021-06-26T04:32:43.000Z | 2021-07-14T04:47:52.000Z | plugins/flytekit-athena/flytekitplugins/athena/__init__.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | null | null | null | from .task import AthenaConfig, AthenaTask
| 21.5 | 42 | 0.837209 | 5 | 43 | 7.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.116279 | 43 | 1 | 43 | 43 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b31ff1ac651821b677bd0399142eae35fdd0bf45 | 146 | py | Python | backend/edw/signals/__init__.py | MMotionMan/django-edw | 0f686429d29e0f40409a3b2318664973b2844c08 | [
"BSD-3-Clause"
] | 4 | 2019-09-18T05:51:12.000Z | 2020-10-23T08:50:00.000Z | backend/edw/signals/__init__.py | Vvvnukova/django-edw | 18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f | [
"BSD-3-Clause"
] | 10 | 2020-04-29T11:46:44.000Z | 2022-03-11T23:38:27.000Z | backend/edw/signals/__init__.py | Vvvnukova/django-edw | 18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f | [
"BSD-3-Clause"
] | 13 | 2020-04-09T07:49:48.000Z | 2022-03-02T07:06:28.000Z | # -*- coding: utf-8 -*-
def make_dispatch_uid(*args):
return "::".join(map(lambda obj: obj if isinstance(obj, str) else str(id(obj)), args)) | 29.2 | 90 | 0.636986 | 23 | 146 | 3.956522 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008065 | 0.150685 | 146 | 5 | 90 | 29.2 | 0.725806 | 0.143836 | 0 | 0 | 0 | 0 | 0.016129 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
2fbf03fe000c1b98be97869022ca4eec73963dab | 247 | py | Python | modules/ui/introscreen.py | abstractdonut/primavista | c232cf2a67875233f677ee9d23dcc9227fc97a53 | [
"MIT"
] | null | null | null | modules/ui/introscreen.py | abstractdonut/primavista | c232cf2a67875233f677ee9d23dcc9227fc97a53 | [
"MIT"
] | null | null | null | modules/ui/introscreen.py | abstractdonut/primavista | c232cf2a67875233f677ee9d23dcc9227fc97a53 | [
"MIT"
] | null | null | null | from kivymd.uix.screen import MDScreen
class IntroScreen(MDScreen):
def goto_exercise(self):
self.manager.goto_exercise()
print("IntroScreen: goto_exercise")
def goto_choose(self):
self.manager.goto_choose()
| 22.454545 | 43 | 0.688259 | 29 | 247 | 5.689655 | 0.517241 | 0.218182 | 0.181818 | 0.230303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.214575 | 247 | 10 | 44 | 24.7 | 0.850515 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0.142857 | 0 | 0.571429 | 0.142857 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
2fcda9cf6d654da0edc4fb98bdd920c383ea0fad | 149 | py | Python | daskms/experimental/arrow/__init__.py | ratt-ru/dask-ms | becd3572f86a0ad78b55540f25fce6e129976a29 | [
"BSD-3-Clause"
] | 7 | 2019-08-23T03:44:53.000Z | 2021-05-06T00:51:18.000Z | daskms/experimental/arrow/__init__.py | ska-sa/dask-ms | ce33e7aad36eeb7c2c79093622b9776186856304 | [
"BSD-3-Clause"
] | 76 | 2019-08-20T14:34:05.000Z | 2022-02-10T13:21:29.000Z | daskms/experimental/arrow/__init__.py | ratt-ru/dask-ms | becd3572f86a0ad78b55540f25fce6e129976a29 | [
"BSD-3-Clause"
] | 4 | 2019-10-15T13:35:19.000Z | 2021-03-23T14:52:23.000Z | from daskms.experimental.arrow.reads import xds_from_parquet # noqa: F401
from daskms.experimental.arrow.writes import xds_to_parquet # noqa: F401
| 49.666667 | 74 | 0.825503 | 22 | 149 | 5.409091 | 0.545455 | 0.168067 | 0.369748 | 0.453782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045113 | 0.107383 | 149 | 2 | 75 | 74.5 | 0.849624 | 0.14094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
641fd55dc8eabd24d8b9c2110cb4d89d24cda46f | 9,353 | py | Python | test/test_markdown_emphasis_rule_10.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | null | null | null | test/test_markdown_emphasis_rule_10.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | null | null | null | test/test_markdown_emphasis_rule_10.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | null | null | null | """
https://github.github.com/gfm/#emphasis-and-strong-emphasis
"""
import pytest
from .utils import act_and_assert
@pytest.mark.gfm
def test_emphasis_431():
"""
Test case 431: (part 1) Any nonempty sequence of inline elements can be the contents of an strongly emphasized span.
"""
# Arrange
source_markdown = """**foo [bar](/url)**"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:*]",
"[text(1,3):foo :]",
"[link(1,7):inline:/url:::::bar:False::::]",
"[text(1,8):bar:]",
"[end-link::]",
"[end-emphasis(1,18)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo <a href="/url">bar</a></strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_432():
"""
Test case 432: (part 2) Any nonempty sequence of inline elements can be the contents of an strongly emphasized span.
"""
# Arrange
source_markdown = """**foo
bar**"""
expected_tokens = [
"[para(1,1):\n]",
"[emphasis(1,1):2:*]",
"[text(1,3):foo\nbar::\n]",
"[end-emphasis(2,4)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo
bar</strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_433():
"""
Test case 433: (part 1) In particular, emphasis and strong emphasis can be nested inside strong emphasis:
"""
# Arrange
source_markdown = """__foo _bar_ baz__"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:_]",
"[text(1,3):foo :]",
"[emphasis(1,7):1:_]",
"[text(1,8):bar:]",
"[end-emphasis(1,11)::]",
"[text(1,12): baz:]",
"[end-emphasis(1,16)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo <em>bar</em> baz</strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_434():
"""
Test case 434: (part 2) In particular, emphasis and strong emphasis can be nested inside strong emphasis:
"""
# Arrange
source_markdown = """__foo __bar__ baz__"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:_]",
"[text(1,3):foo :]",
"[emphasis(1,7):2:_]",
"[text(1,9):bar:]",
"[end-emphasis(1,12)::]",
"[text(1,14): baz:]",
"[end-emphasis(1,18)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo <strong>bar</strong> baz</strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_435():
"""
Test case 435: (part 3) In particular, emphasis and strong emphasis can be nested inside strong emphasis:
"""
# Arrange
source_markdown = """____foo__ bar__"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:_]",
"[emphasis(1,3):2:_]",
"[text(1,5):foo:]",
"[end-emphasis(1,8)::]",
"[text(1,10): bar:]",
"[end-emphasis(1,14)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong><strong>foo</strong> bar</strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_436():
"""
Test case 436: (part 4) In particular, emphasis and strong emphasis can be nested inside strong emphasis:
"""
# Arrange
source_markdown = """**foo **bar****"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:*]",
"[text(1,3):foo :]",
"[emphasis(1,7):2:*]",
"[text(1,9):bar:]",
"[end-emphasis(1,12)::]",
"[end-emphasis(1,14)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo <strong>bar</strong></strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_437():
"""
Test case 437: (part 5) In particular, emphasis and strong emphasis can be nested inside strong emphasis:
"""
# Arrange
source_markdown = """**foo *bar* baz**"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:*]",
"[text(1,3):foo :]",
"[emphasis(1,7):1:*]",
"[text(1,8):bar:]",
"[end-emphasis(1,11)::]",
"[text(1,12): baz:]",
"[end-emphasis(1,16)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo <em>bar</em> baz</strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_438():
"""
Test case 438: (part 6) In particular, emphasis and strong emphasis can be nested inside strong emphasis:
"""
# Arrange
source_markdown = """**foo*bar*baz**"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:*]",
"[text(1,3):foo:]",
"[emphasis(1,6):1:*]",
"[text(1,7):bar:]",
"[end-emphasis(1,10)::]",
"[text(1,11):baz:]",
"[end-emphasis(1,14)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo<em>bar</em>baz</strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_439():
"""
Test case 439: (part 7) In particular, emphasis and strong emphasis can be nested inside strong emphasis:
"""
# Arrange
source_markdown = """***foo* bar**"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:*]",
"[emphasis(1,3):1:*]",
"[text(1,4):foo:]",
"[end-emphasis(1,7)::]",
"[text(1,8): bar:]",
"[end-emphasis(1,12)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong><em>foo</em> bar</strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_440():
"""
Test case 440: (part 8) In particular, emphasis and strong emphasis can be nested inside strong emphasis:
"""
# Arrange
source_markdown = """**foo *bar***"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:*]",
"[text(1,3):foo :]",
"[emphasis(1,7):1:*]",
"[text(1,8):bar:]",
"[end-emphasis(1,11)::]",
"[end-emphasis(1,12)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo <em>bar</em></strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_441():
"""
Test case 441: (part 1) Indefinite levels of nesting are possible:
"""
# Arrange
source_markdown = """**foo *bar **baz**
bim* bop**"""
expected_tokens = [
"[para(1,1):\n]",
"[emphasis(1,1):2:*]",
"[text(1,3):foo :]",
"[emphasis(1,7):1:*]",
"[text(1,8):bar :]",
"[emphasis(1,12):2:*]",
"[text(1,14):baz:]",
"[end-emphasis(1,17)::]",
"[text(1,19):\nbim::\n]",
"[end-emphasis(2,4)::]",
"[text(2,5): bop:]",
"[end-emphasis(2,9)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo <em>bar <strong>baz</strong>
bim</em> bop</strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_442():
"""
Test case 442: (part 2) Indefinite levels of nesting are possible:
"""
# Arrange
source_markdown = """**foo [*bar*](/url)**"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):2:*]",
"[text(1,3):foo :]",
"[link(1,7):inline:/url:::::*bar*:False::::]",
"[emphasis(1,8):1:*]",
"[text(1,9):bar:]",
"[end-emphasis(1,12)::]",
"[end-link::]",
"[end-emphasis(1,20)::]",
"[end-para:::True]",
]
expected_gfm = """<p><strong>foo <a href="/url"><em>bar</em></a></strong></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_443():
"""
Test case 443: (part 1) There can be no empty emphasis or strong emphasis:
"""
# Arrange
source_markdown = """__ is not an empty emphasis"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):__:]",
"[text(1,3): is not an empty emphasis:]",
"[end-para:::True]",
]
expected_gfm = """<p>__ is not an empty emphasis</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_emphasis_444():
"""
Test case 444: (part 2) There can be no empty emphasis or strong emphasis:
"""
# Arrange
source_markdown = """____ is not an empty strong emphasis"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):____:]",
"[text(1,5): is not an empty strong emphasis:]",
"[end-para:::True]",
]
expected_gfm = """<p>____ is not an empty strong emphasis</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 26.95389 | 121 | 0.539292 | 1,195 | 9,353 | 4.063598 | 0.086192 | 0.079695 | 0.049423 | 0.046129 | 0.885502 | 0.867586 | 0.855025 | 0.846582 | 0.839992 | 0.834638 | 0 | 0.048024 | 0.245269 | 9,353 | 346 | 122 | 27.031792 | 0.639892 | 0.185395 | 0 | 0.606481 | 0 | 0.00463 | 0.406564 | 0.116355 | 0 | 0 | 0 | 0 | 0.069444 | 1 | 0.064815 | false | 0 | 0.009259 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ff671e86df34104d73fb75c375effa5a9b8a6df8 | 7,933 | py | Python | amspy/restfns.py | msleal/amspy | c691881c6b2bf9bae357a4cb327d71033426abed | [
"MIT"
] | 11 | 2016-09-08T19:28:45.000Z | 2020-10-13T00:31:57.000Z | amspy/restfns.py | msleal/amspy | c691881c6b2bf9bae357a4cb327d71033426abed | [
"MIT"
] | 4 | 2016-09-19T18:18:01.000Z | 2017-07-03T20:12:56.000Z | amspy/restfns.py | msleal/amspy | c691881c6b2bf9bae357a4cb327d71033426abed | [
"MIT"
] | 7 | 2016-09-10T18:43:10.000Z | 2019-12-05T11:05:21.000Z | """
Copyright (c) 2016, Marcelo Leal
Description: Simple Azure Media Services Python library
License: MIT (see LICENSE.txt file for details)
"""
# restfns - REST functions for amspy
import requests
import json
from .settings import json_acceptformat, json_only_acceptformat, xml_acceptformat, batch_acceptformat, charset, dsversion_min, dsversion_max, xmsversion
#Defaults
# do_auth(endpoint, body, access_token)
# do an HTTP POST request for authentication (acquire an access token) and return JSON
def do_auth(endpoint, body):
global dsversion_min, dsversion_max, json_acceptformat, json_acceptformat
min_ds = dsversion_min; max_ds = dsversion_max; content_acceptformat = json_acceptformat; acceptformat = json_acceptformat
headers = {"content-type": "application/x-www-form-urlencoded",
"Accept": acceptformat}
return requests.post(endpoint, data=body, headers=headers)
# do_get(endpoint, path, access_token)
# do an HTTP GET request and return JSON
def do_get(endpoint, path, access_token):
global dsversion_min, dsversion_max, json_acceptformat, json_acceptformat
min_ds = dsversion_min; max_ds = dsversion_max; content_acceptformat = json_acceptformat; acceptformat = json_acceptformat
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": max_ds,
"Accept": acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
body = ''
response = requests.get(endpoint, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if (response.status_code == 301):
redirected_url = ''.join([response.headers['location'], path])
response = requests.get(redirected_url, data=body, headers=headers)
return response
# do_put(endpoint, path, body, access_token, format="json", ds_min_version="3.0;NetFx")
# do an HTTP PUT request and return JSON
def do_put(endpoint, path, body, access_token, format="json", ds_min_version="3.0;NetFx"):
global dsversion_min, dsversion_max, json_acceptformat, json_acceptformat
min_ds = dsversion_min; max_ds = dsversion_max; content_acceptformat = json_acceptformat; acceptformat = json_acceptformat
if (format == "json_only"):
min_ds = ds_min_version
content_acceptformat = json_only_acceptformat
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": max_ds,
"Accept": acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.put(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if (response.status_code == 301):
redirected_url = ''.join([response.headers['location'], path])
response = requests.put(redirected_url, data=body, headers=headers)
return response
# do_post(endpoint, body, access_token, format="json", ds_min_version="3.0;NetFx")
# do an HTTP POST request and return JSON
def do_post(endpoint, path, body, access_token, format="json", ds_min_version="3.0;NetFx"):
global dsversion_min, dsversion_max, json_acceptformat, json_acceptformat
min_ds = dsversion_min; max_ds = dsversion_max; content_acceptformat = json_acceptformat; acceptformat = json_acceptformat
if (format == "json_only"):
min_ds = ds_min_version
content_acceptformat = json_only_acceptformat
if (format == "xml"):
content_acceptformat = xml_acceptformat
acceptformat = xml_acceptformat + ",application/xml"
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": max_ds,
"Accept": acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.post(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if (response.status_code == 301):
redirected_url = ''.join([response.headers['location'], path])
response = requests.post(redirected_url, data=body, headers=headers)
return response
# do_patch(endpoint, path, body, access_token)
# do an HTTP PATCH request and return JSON
def do_patch(endpoint, path, body, access_token):
global dsversion_min, dsversion_max, json_acceptformat, json_acceptformat
min_ds = dsversion_min; max_ds = dsversion_max; content_acceptformat = json_acceptformat; acceptformat = json_acceptformat
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": max_ds,
"Accept": acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.patch(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if (response.status_code == 301):
redirected_url = ''.join([response.headers['location'], path])
response = requests.patch(redirected_url, data=body, headers=headers)
return response
# do_delete(endpoint, access_token)
# do an HTTP DELETE request and return JSON
def do_delete(endpoint, path, access_token):
global dsversion_min, dsversion_max, json_acceptformat, json_acceptformat
min_ds = dsversion_min; max_ds = dsversion_max; content_acceptformat = json_acceptformat; acceptformat = json_acceptformat
headers = {"DataServiceVersion": min_ds,
"MaxDataServiceVersion": max_ds,
"Accept": acceptformat,
"Accept-Charset" : charset,
"Authorization": 'Bearer ' + access_token,
"x-ms-version" : xmsversion}
response = requests.delete(endpoint, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if (response.status_code == 301):
redirected_url = ''.join([response.headers['location'], path])
response = requests.delete(redirected_url, headers=headers)
return response
# do_sto_put(endpoint, body, access_token)
# do an HTTP PUT request to the azure storage api and return JSON
def do_sto_put(endpoint, body, content_length, access_token):
global dsversion_min, dsversion_max, json_acceptformat, json_acceptformat
min_ds = dsversion_min; max_ds = dsversion_max; content_acceptformat = json_acceptformat; acceptformat = json_acceptformat
headers = {"Accept": acceptformat,
"Accept-Charset" : charset,
"x-ms-blob-type" : "BlockBlob",
"x-ms-meta-m1": "v1",
"x-ms-meta-m2": "v2",
"x-ms-version" : "2015-02-21",
"Content-Length" : str(content_length)}
return requests.put(endpoint, data=body, headers=headers)
# do_get_url(endpoint, access_token)
# do an HTTP GET request and return JSON
def do_get_url(endpoint, access_token, flag=True):
global dsversion_min, dsversion_max, json_acceptformat, json_acceptformat
min_ds = dsversion_min; max_ds = dsversion_max; content_acceptformat = json_acceptformat; acceptformat = json_acceptformat
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": max_ds,
"Accept": acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
body = ''
response = requests.get(endpoint, headers=headers, allow_redirects=flag)
if(flag):
if (response.status_code == 301):
response = requests.get(response.headers['location'], data=body, headers=headers)
return response
| 48.078788 | 152 | 0.738686 | 1,001 | 7,933 | 5.652348 | 0.125874 | 0.093319 | 0.11877 | 0.038883 | 0.86444 | 0.832803 | 0.795334 | 0.756981 | 0.756981 | 0.72128 | 0 | 0.006284 | 0.157444 | 7,933 | 164 | 153 | 48.371951 | 0.840215 | 0.192991 | 0 | 0.675 | 0 | 0 | 0.137427 | 0.024973 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.025 | 0 | 0.158333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
44853d5849ef798c9af01590c3a5bb2d352e6c4b | 199 | py | Python | cartex/__init__.py | tochikuji/Cartoon-Texture-Decomposition | 9ac7bbafda426c653f2c3e66c73f65d927542154 | [
"Apache-2.0"
] | 7 | 2020-05-21T08:24:07.000Z | 2022-02-27T16:47:20.000Z | cartex/__init__.py | tochikuji/Cartoon-Texture-Decomposition | 9ac7bbafda426c653f2c3e66c73f65d927542154 | [
"Apache-2.0"
] | null | null | null | cartex/__init__.py | tochikuji/Cartoon-Texture-Decomposition | 9ac7bbafda426c653f2c3e66c73f65d927542154 | [
"Apache-2.0"
] | null | null | null | from cartex.tools import expect_valid_float_image
from cartex.iterative_lpf import iterativeLPF
from cartex.ltv import LTV, channelwiseLTV
from cartex.decomposition import CartoonTextureDecomposition | 49.75 | 60 | 0.894472 | 25 | 199 | 6.96 | 0.6 | 0.229885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.080402 | 199 | 4 | 60 | 49.75 | 0.95082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9240896e2daac5e918da84944e0553ad3aacff53 | 19 | py | Python | pyTGA/__init__.py | Lightslayer/pyTGA | 7882e84dc6020119f3a1842e162f641061ff8248 | [
"MIT"
] | 17 | 2016-09-19T10:08:52.000Z | 2022-02-28T09:24:35.000Z | pyTGA/__init__.py | Lightslayer/pyTGA | 7882e84dc6020119f3a1842e162f641061ff8248 | [
"MIT"
] | 2 | 2019-01-19T16:45:52.000Z | 2019-11-04T10:53:53.000Z | pyTGA/__init__.py | Lightslayer/pyTGA | 7882e84dc6020119f3a1842e162f641061ff8248 | [
"MIT"
] | 5 | 2017-07-22T19:12:14.000Z | 2019-08-17T07:25:29.000Z | from . tga import * | 19 | 19 | 0.684211 | 3 | 19 | 4.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.210526 | 19 | 1 | 19 | 19 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
925e80cbebca9ba89ee8f1a2f445e6ca760a0097 | 997 | py | Python | test/test_get_image_info_result.py | Cloudmersive/Cloudmersive.APIClient.Python.Convert | dba2fe7257229ebdacd266531b3724552c651009 | [
"Apache-2.0"
] | 3 | 2018-07-25T23:04:34.000Z | 2021-08-10T16:43:10.000Z | test/test_get_image_info_result.py | Cloudmersive/Cloudmersive.APIClient.Python.Convert | dba2fe7257229ebdacd266531b3724552c651009 | [
"Apache-2.0"
] | 3 | 2020-11-23T10:46:48.000Z | 2021-12-30T14:09:34.000Z | test/test_get_image_info_result.py | Cloudmersive/Cloudmersive.APIClient.Python.Convert | dba2fe7257229ebdacd266531b3724552c651009 | [
"Apache-2.0"
] | 2 | 2020-01-07T09:48:01.000Z | 2020-11-23T10:47:00.000Z | # coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.get_image_info_result import GetImageInfoResult # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestGetImageInfoResult(unittest.TestCase):
"""GetImageInfoResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetImageInfoResult(self):
"""Test GetImageInfoResult"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.get_image_info_result.GetImageInfoResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.317073 | 113 | 0.737212 | 113 | 997 | 6.230089 | 0.575221 | 0.071023 | 0.125 | 0.159091 | 0.198864 | 0.147727 | 0.147727 | 0.147727 | 0.147727 | 0 | 0 | 0.013631 | 0.190572 | 997 | 40 | 114 | 24.925 | 0.858736 | 0.442327 | 0 | 0.214286 | 1 | 0 | 0.015564 | 0 | 0 | 0 | 0 | 0.025 | 0 | 1 | 0.214286 | false | 0.214286 | 0.357143 | 0 | 0.642857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
2bcef91853a1d15439679525195ad22cb6470db4 | 41 | py | Python | generators/tests/templates/tests/test_lib.py | The-Politico/generator-politico-python-package | b5882eed9dfc8c1025a6ac25212e325246961a48 | [
"MIT"
] | 5 | 2018-01-30T17:36:35.000Z | 2021-02-28T12:08:29.000Z | generators/tests/templates/tests/test_lib.py | The-Politico/generator-politico-python-package | b5882eed9dfc8c1025a6ac25212e325246961a48 | [
"MIT"
] | 1 | 2018-01-05T19:33:47.000Z | 2018-01-05T19:33:47.000Z | generators/tests/templates/tests/test_lib.py | The-Politico/generator-politico-python-package | b5882eed9dfc8c1025a6ac25212e325246961a48 | [
"MIT"
] | null | null | null | def tests_lib():
assert True is True
| 13.666667 | 23 | 0.682927 | 7 | 41 | 3.857143 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.243902 | 41 | 2 | 24 | 20.5 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0.5 | true | 0 | 0 | 0 | 0.5 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2beab61857511c1e9ee2a2b7a4608d0eeed543ec | 26,857 | py | Python | tools/graph_builder.py | teploff/fractal-surface | e2b8a7cd30f710e3e2522c8f4eb0240832bea012 | [
"MIT"
] | null | null | null | tools/graph_builder.py | teploff/fractal-surface | e2b8a7cd30f710e3e2522c8f4eb0240832bea012 | [
"MIT"
] | null | null | null | tools/graph_builder.py | teploff/fractal-surface | e2b8a7cd30f710e3e2522c8f4eb0240832bea012 | [
"MIT"
] | null | null | null | import pickle
from typing import List
import matplotlib.pyplot as plt
def build_classic_one_phase(iter_count: int, depth: int):
with open(f'../metrics/datasets/classic/iterations_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
iterations = pickle.load(fp)
with open(f'../metrics/datasets/classic/length_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
line_length = pickle.load(fp)
with open(f'../metrics/datasets/classic/square_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
square = pickle.load(fp)
with open(f'../metrics/datasets/classic/volume_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
volume = pickle.load(fp)
with open(f'../metrics/datasets/classic/s_l_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
s_l = pickle.load(fp)
with open(f'../metrics/datasets/classic/v_s_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
v_s = pickle.load(fp)
with open(f'../metrics/datasets/classic/v_l_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
v_l = pickle.load(fp)
with open(f'../metrics/datasets/classic/v_v_base_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
v_v_base = pickle.load(fp)
with open(f'../metrics/datasets/classic/fractal_span_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
fractal_span = pickle.load(fp)
# # TODO: разкомментировать по необходиомости
# # Производим интерполяцию по найденным метрикам
# y_length = make_interpolation(iterations, line_length)
# y_square = make_interpolation(iterations, square)
# y_volume = make_interpolation(iterations, volume)
# Строим графики для найденных и апроксимируемыъ метрик.
fig1, ax1 = plt.subplots()
ax1.plot(iterations, line_length, 'o', label=r'$a$', c='black', linewidth=1)
fig2, ax2 = plt.subplots()
ax2.plot(iterations, square, 'X', label=r'$a$', c='black', linewidth=1)
fig3, ax3 = plt.subplots()
ax3.plot(iterations, volume, '*', label=r'$a$', c='black', linewidth=1)
fig4, ax4 = plt.subplots()
ax4.plot(iterations, s_l, '*', label=r'$a$', c='black', linewidth=1)
fig5, ax5 = plt.subplots()
ax5.plot(iterations, v_s, '*', label=r'$a$', c='black', linewidth=1)
fig6, ax6 = plt.subplots()
ax6.plot(iterations, v_l, '*', label=r'$a$', c='black', linewidth=1)
fig7, ax7 = plt.subplots()
ax7.plot(iterations, v_v_base, '*', label=r'$a$', c='black', linewidth=1)
fig8, ax8 = plt.subplots()
ax8.plot(iterations, fractal_span, '*', label=r'$a$', c='black', linewidth=1)
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
ax5.grid(True)
ax6.grid(True)
ax7.grid(True)
ax8.grid(True)
ax1.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax1.set(xlabel='Число циклов роста, ед.', ylabel='Длина фрактальной линии, ед.')
ax2.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax2.set(xlabel='Число циклов роста, ед.', ylabel='Площадь фрактала, ед.')
ax3.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax3.set(xlabel='Число циклов роста, ед.', ylabel='Объем фрактала, ед.')
ax4.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax4.set(xlabel='Число циклов роста, ед.', ylabel='Отношение S/L, ед.')
ax5.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax5.set(xlabel='Число циклов роста, ед.', ylabel='Отношение V/S, ед.')
ax6.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax6.set(xlabel='Число циклов роста, ед.', ylabel='Отношение V/L, ед.')
ax7.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax7.set(xlabel='Число циклов роста, ед.', ylabel='Отношение 4*V1/V0, ед.')
ax8.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax8.set(xlabel='Число циклов роста, ед.', ylabel='Размах фрактала, ед.')
fig1.savefig(f'../metrics/graphics/classic/length.png')
fig2.savefig(f'../metrics/graphics/classic/square.png')
fig3.savefig(f'../metrics/graphics/classic/value.png')
fig4.savefig(f'../metrics/graphics/classic/s_l.png')
fig5.savefig(f'../metrics/graphics/classic/v_s.png')
fig6.savefig(f'../metrics/graphics/classic/v_l.png')
fig7.savefig(f'../metrics/graphics/classic/4v1_v0.png')
fig8.savefig(f'../metrics/graphics/classic/fractal_span.png')
plt.show()
def build_one_phase(iter_count: int, depth: int):
with open(f'../metrics/datasets/one_phase/iterations_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
iterations = pickle.load(fp)
with open(f'../metrics/datasets/one_phase/length_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
line_length = pickle.load(fp)
with open(f'../metrics/datasets/one_phase/square_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
square = pickle.load(fp)
with open(f'../metrics/datasets/one_phase/volume_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
volume = pickle.load(fp)
with open(f'../metrics/datasets/one_phase/s_l_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
s_l = pickle.load(fp)
with open(f'../metrics/datasets/one_phase/v_s_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
v_s = pickle.load(fp)
with open(f'../metrics/datasets/one_phase/v_l_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
v_l = pickle.load(fp)
with open(f'../metrics/datasets/one_phase/v_v_base_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
v_v_base = pickle.load(fp)
with open(f'../metrics/datasets/one_phase/fractal_span_iter_count_{iter_count}_depth_{depth}.txt', 'rb') as fp:
fractal_span = pickle.load(fp)
# Строим графики для найденных и апроксимируемыъ метрик.
fig1, ax1 = plt.subplots()
ax1.plot(iterations, line_length, 'o', label=r'$a$', c='black', linewidth=1)
fig2, ax2 = plt.subplots()
ax2.plot(iterations, square, 'X', label=r'$a$', c='black', linewidth=1)
fig3, ax3 = plt.subplots()
ax3.plot(iterations, volume, '*', label=r'$a$', c='black', linewidth=1)
fig4, ax4 = plt.subplots()
ax4.plot(iterations, s_l, '*', label=r'$a$', c='black', linewidth=1)
fig5, ax5 = plt.subplots()
ax5.plot(iterations, v_s, '*', label=r'$a$', c='black', linewidth=1)
fig6, ax6 = plt.subplots()
ax6.plot(iterations, v_l, '*', label=r'$a$', c='black', linewidth=1)
fig7, ax7 = plt.subplots()
ax7.plot(iterations, v_v_base, '*', label=r'$a$', c='black', linewidth=1)
fig8, ax8 = plt.subplots()
ax8.plot(iterations, fractal_span, '*', label=r'$a$', c='black', linewidth=1)
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
ax5.grid(True)
ax6.grid(True)
ax7.grid(True)
ax8.grid(True)
ax1.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax1.set(xlabel='Число циклов роста, ед.', ylabel='Длина фрактальной линии, ед.')
ax2.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax2.set(xlabel='Число циклов роста, ед.', ylabel='Площадь фрактала, ед.')
ax3.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax3.set(xlabel='Число циклов роста, ед.', ylabel='Объем фрактала, ед.')
ax4.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax4.set(xlabel='Число циклов роста, ед.', ylabel='Отношение S/L, ед.')
ax5.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax5.set(xlabel='Число циклов роста, ед.', ylabel='Отношение V/S, ед.')
ax6.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax6.set(xlabel='Число циклов роста, ед.', ylabel='Отношение V/L, ед.')
ax7.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax7.set(xlabel='Число циклов роста, ед.', ylabel='Отношение 4*V1/V0, ед.')
ax8.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax8.set(xlabel='Число циклов роста, ед.', ylabel='Размах фрактала, ед.')
fig1.savefig(f'../metrics/graphics/one_phase/length.png')
fig2.savefig(f'../metrics/graphics/one_phase/square.png')
fig3.savefig(f'../metrics/graphics/one_phase/value.png')
fig4.savefig(f'../metrics/graphics/one_phase/s_l.png')
fig5.savefig(f'../metrics/graphics/one_phase/v_s.png')
fig6.savefig(f'../metrics/graphics/one_phase/v_l.png')
fig7.savefig(f'../metrics/graphics/one_phase/4v1_v0.png')
fig8.savefig(f'../metrics/graphics/one_phase/fractal_span.png')
plt.show()
def build_several_phases(iter_count: int, depth: int, deltas: List[int]):
with open(f'../metrics/datasets/several_phases/iterations_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
iterations1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/length_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
line_length1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/square_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
square1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/volume_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
volume1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/s_l_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
s_l1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_s_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
v_s1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_l_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
v_l1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_v_base_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
v_v_base1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/fractal_span_iter_count_{iter_count}_depth_{depth}_delta_{deltas[0]}.txt', 'rb') as fp:
fractal_span1 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/iterations_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
iterations2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/length_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
line_length2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/square_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
square2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/volume_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
volume2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/s_l_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
s_l2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_s_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
v_s2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_l_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
v_l2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_v_base_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
v_v_base2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/fractal_span_iter_count_{iter_count}_depth_{depth}_delta_{deltas[1]}.txt', 'rb') as fp:
fractal_span2 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/iterations_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
iterations3 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/length_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
line_length3 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/square_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
square3 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/volume_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
volume3 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/s_l_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
s_l3 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_s_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
v_s3 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_l_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
v_l3 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/v_v_base_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
v_v_base3 = pickle.load(fp)
with open(f'../metrics/datasets/several_phases/fractal_span_iter_count_{iter_count}_depth_{depth}_delta_{deltas[2]}.txt', 'rb') as fp:
fractal_span3 = pickle.load(fp)
# Строим графики для найденных и апроксимируемыъ метрик.
fig1, ax1 = plt.subplots()
ax1.plot(iterations1, line_length1, 'o', label=r'$1$', c='black', linewidth=1)
ax1.plot(iterations2, line_length2, 'o', label=r'$200$', c='red', linewidth=1)
ax1.plot(iterations3, line_length3, 'o', label=r'$400$', c='blue', linewidth=1)
fig2, ax2 = plt.subplots()
ax2.plot(iterations1, square1, 'X', label=r'$1$', c='black', linewidth=1)
ax2.plot(iterations2, square2, 'X', label=r'$200$', c='red', linewidth=1)
ax2.plot(iterations3, square3, 'X', label=r'$400$', c='blue', linewidth=1)
fig3, ax3 = plt.subplots()
ax3.plot(iterations1, volume1, '*', label=r'$1$', c='black', linewidth=1)
ax3.plot(iterations2, volume2, '*', label=r'$200$', c='red', linewidth=1)
ax3.plot(iterations3, volume3, '*', label=r'$400$', c='blue', linewidth=1)
fig4, ax4 = plt.subplots()
ax4.plot(iterations1, s_l1, '*', label=r'$1$', c='black', linewidth=1)
ax4.plot(iterations2, s_l2, '*', label=r'$200$', c='red', linewidth=1)
ax4.plot(iterations3, s_l3, '*', label=r'$400$', c='blue', linewidth=1)
fig5, ax5 = plt.subplots()
ax5.plot(iterations1, v_s1, '*', label=r'$1$', c='black', linewidth=1)
ax5.plot(iterations2, v_s2, '*', label=r'$200$', c='red', linewidth=1)
ax5.plot(iterations3, v_s3, '*', label=r'$400$', c='blue', linewidth=1)
fig6, ax6 = plt.subplots()
ax6.plot(iterations1, v_l1, '*', label=r'$1$', c='black', linewidth=1)
ax6.plot(iterations2, v_l2, '*', label=r'$200$', c='red', linewidth=1)
ax6.plot(iterations3, v_l3, '*', label=r'$400$', c='blue', linewidth=1)
fig7, ax7 = plt.subplots()
ax7.plot(iterations1, v_v_base1, '*', label=r'$1$', c='black', linewidth=1)
ax7.plot(iterations2, v_v_base2, '*', label=r'$200$', c='red', linewidth=1)
ax7.plot(iterations3, v_v_base3, '*', label=r'$400$', c='blue', linewidth=1)
fig8, ax8 = plt.subplots()
ax8.plot(iterations1, fractal_span1, '*', label=r'$1$', c='black', linewidth=1)
ax8.plot(iterations2, fractal_span2, '*', label=r'$200$', c='red', linewidth=1)
ax8.plot(iterations3, fractal_span3, '*', label=r'$400$', c='blue', linewidth=1)
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
ax5.grid(True)
ax6.grid(True)
ax7.grid(True)
ax8.grid(True)
ax1.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax1.set(xlabel='Число циклов роста, ед.', ylabel='Длина фрактальной линии, ед.')
ax2.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax2.set(xlabel='Число циклов роста, ед.', ylabel='Площадь фрактала, ед.')
ax3.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax3.set(xlabel='Число циклов роста, ед.', ylabel='Объем фрактала, ед.')
ax4.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax4.set(xlabel='Число циклов роста, ед.', ylabel='Отношение S/L, ед.')
ax5.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax5.set(xlabel='Число циклов роста, ед.', ylabel='Отношение V/S, ед.')
ax6.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax6.set(xlabel='Число циклов роста, ед.', ylabel='Отношение V/L, ед.')
ax7.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax7.set(xlabel='Число циклов роста, ед.', ylabel='Отношение 4*V1/V0, ед.')
ax8.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax8.set(xlabel='Число циклов роста, ед.', ylabel='Размах фрактала, ед.')
fig1.savefig(f'../metrics/graphics/several_phases/length.png')
fig2.savefig(f'../metrics/graphics/several_phases/square.png')
fig3.savefig(f'../metrics/graphics/several_phases/value.png')
fig4.savefig(f'../metrics/graphics/several_phases/s_l.png')
fig5.savefig(f'../metrics/graphics/several_phases/v_s.png')
fig6.savefig(f'../metrics/graphics/several_phases/v_l.png')
fig7.savefig(f'../metrics/graphics/several_phases/4v1_v0.png')
fig8.savefig(f'../metrics/graphics/several_phases/fractal_span.png')
plt.show()
def build_stochastic(iter_count: int, depth: int, l_rndms: List[float]):
with open(f'../metrics/datasets/stochasticity/iterations_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
iterations1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/length_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
line_length1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/square_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
square1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/volume_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
volume1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/s_l_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
s_l1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_s_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
v_s1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_l_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
v_l1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_v_base_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
v_v_base1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/fractal_span_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[0]}.txt', 'rb') as fp:
fractal_span1 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/iterations_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
iterations2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/length_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
line_length2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/square_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
square2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/volume_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
volume2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/s_l_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
s_l2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_s_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
v_s2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_l_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
v_l2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_v_base_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
v_v_base2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/fractal_span_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[1]}.txt', 'rb') as fp:
fractal_span2 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/iterations_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
iterations3 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/length_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
line_length3 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/square_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
square3 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/volume_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
volume3 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/s_l_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
s_l3 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_s_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
v_s3 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_l_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
v_l3 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/v_v_base_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
v_v_base3 = pickle.load(fp)
with open(f'../metrics/datasets/stochasticity/fractal_span_iter_count_{iter_count}_depth_{depth}_l_rnd_{l_rndms[2]}.txt', 'rb') as fp:
fractal_span3 = pickle.load(fp)
# Строим графики для найденных и апроксимируемыъ метрик.
fig1, ax1 = plt.subplots()
ax1.plot(iterations1, line_length1, 'o', label=r'$0.6$', c='black', linewidth=1)
ax1.plot(iterations2, line_length2, 'o', label=r'$0.75$', c='red', linewidth=1)
ax1.plot(iterations3, line_length3, 'o', label=r'$0.9$', c='blue', linewidth=1)
fig2, ax2 = plt.subplots()
ax2.plot(iterations1, square1, 'X', label=r'$0.6$', c='black', linewidth=1)
ax2.plot(iterations2, square2, 'X', label=r'$0.75$', c='red', linewidth=1)
ax2.plot(iterations3, square3, 'X', label=r'$0.9$', c='blue', linewidth=1)
fig3, ax3 = plt.subplots()
ax3.plot(iterations1, volume1, '*', label=r'$0.6$', c='black', linewidth=1)
ax3.plot(iterations2, volume2, '*', label=r'$0.75$', c='red', linewidth=1)
ax3.plot(iterations3, volume3, '*', label=r'$0.9$', c='blue', linewidth=1)
fig4, ax4 = plt.subplots()
ax4.plot(iterations1, s_l1, '*', label=r'$0.6$', c='black', linewidth=1)
ax4.plot(iterations2, s_l2, '*', label=r'$0.75$', c='red', linewidth=1)
ax4.plot(iterations3, s_l3, '*', label=r'$0.9$', c='blue', linewidth=1)
fig5, ax5 = plt.subplots()
ax5.plot(iterations1, v_s1, '*', label=r'$0.6$', c='black', linewidth=1)
ax5.plot(iterations2, v_s2, '*', label=r'$0.75$', c='red', linewidth=1)
ax5.plot(iterations3, v_s3, '*', label=r'$0.9$', c='blue', linewidth=1)
fig6, ax6 = plt.subplots()
ax6.plot(iterations1, v_l1, '*', label=r'$0.6$', c='black', linewidth=1)
ax6.plot(iterations2, v_l2, '*', label=r'$0.75$', c='red', linewidth=1)
ax6.plot(iterations3, v_l3, '*', label=r'$0.9$', c='blue', linewidth=1)
fig7, ax7 = plt.subplots()
ax7.plot(iterations1, v_v_base1, '*', label=r'$0.6$', c='black', linewidth=1)
ax7.plot(iterations2, v_v_base2, '*', label=r'$0.75$', c='red', linewidth=1)
ax7.plot(iterations3, v_v_base3, '*', label=r'$0.9$', c='blue', linewidth=1)
fig8, ax8 = plt.subplots()
ax8.plot(iterations1, fractal_span1, '*', label=r'$0.6$', c='black', linewidth=1)
ax8.plot(iterations2, fractal_span2, '*', label=r'$0.75$', c='red', linewidth=1)
ax8.plot(iterations3, fractal_span3, '*', label=r'$0.9$', c='blue', linewidth=1)
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
ax5.grid(True)
ax6.grid(True)
ax7.grid(True)
ax8.grid(True)
ax1.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax1.set(xlabel='Число циклов роста, ед.', ylabel='Длина фрактальной линии, ед.')
ax2.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax2.set(xlabel='Число циклов роста, ед.', ylabel='Площадь фрактала, ед.')
ax3.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax3.set(xlabel='Число циклов роста, ед.', ylabel='Объем фрактала, ед.')
ax4.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax4.set(xlabel='Число циклов роста, ед.', ylabel='Отношение S/L, ед.')
ax5.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax5.set(xlabel='Число циклов роста, ед.', ylabel='Отношение V/S, ед.')
ax6.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax6.set(xlabel='Число циклов роста, ед.', ylabel='Отношение V/L, ед.')
ax7.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax7.set(xlabel='Число циклов роста, ед.', ylabel='Отношение 4*V1/V0, ед.')
ax8.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax8.set(xlabel='Число циклов роста, ед.', ylabel='Размах фрактала, ед.')
fig1.savefig(f'../metrics/graphics/stochasticity/length.png')
fig2.savefig(f'../metrics/graphics/stochasticity/square.png')
fig3.savefig(f'../metrics/graphics/stochasticity/value.png')
fig4.savefig(f'../metrics/graphics/stochasticity/s_l.png')
fig5.savefig(f'../metrics/graphics/stochasticity/v_s.png')
fig6.savefig(f'../metrics/graphics/stochasticity/v_l.png')
fig7.savefig(f'../metrics/graphics/stochasticity/4v1_v0.png')
fig8.savefig(f'../metrics/graphics/stochasticity/fractal_span.png')
plt.show()
if __name__ == '__main__':
build_classic_one_phase(1000, 7)
build_one_phase(1000, 7)
build_several_phases(1000, 7, [1, 200, 400])
build_stochastic(1000, 7, [0.6, 0.75, 0.9])
| 49.735185 | 138 | 0.684179 | 4,126 | 26,857 | 4.244547 | 0.039748 | 0.076058 | 0.037001 | 0.06578 | 0.977274 | 0.964883 | 0.954834 | 0.920174 | 0.887055 | 0.884257 | 0 | 0.032681 | 0.136389 | 26,857 | 539 | 139 | 49.827458 | 0.722385 | 0.017239 | 0 | 0.572917 | 0 | 0.140625 | 0.40423 | 0.310223 | 0 | 0 | 0 | 0.001855 | 0 | 1 | 0.010417 | false | 0 | 0.007813 | 0 | 0.018229 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a637e891d8c6a5f525f598e654ebe48a31950462 | 15,117 | py | Python | pyioflash/postprocess/sources/energy.py | Balaras-Group/pyIOFlash | d75be74165a1b0114ed3f6186800ed33e06c3d58 | [
"MIT"
] | null | null | null | pyioflash/postprocess/sources/energy.py | Balaras-Group/pyIOFlash | d75be74165a1b0114ed3f6186800ed33e06c3d58 | [
"MIT"
] | null | null | null | pyioflash/postprocess/sources/energy.py | Balaras-Group/pyIOFlash | d75be74165a1b0114ed3f6186800ed33e06c3d58 | [
"MIT"
] | null | null | null | """
This module defines the energy calculation methods of the post-processing
subpackage of the pyioflash lbrary; part of the 'source' set of routines.
This module currently defines the following methods:
thermal -- -- -- -> thermal energy
kinetic -- -- -- -> total instantanious kinetic energy
kinetic_mean -- -> mean (time averaged) kinetic energy
kinetic_turbulant -> turbulant instantanious kinetic energy
Todo:
"""
from typing import List, Dict, Optional, Union, TYPE_CHECKING
from pyioflash.postprocess.utility import _interpolate_ftc, make_sourceable, make_stackable, Output
from pyioflash.postprocess.sources import fields
from pyioflash.postprocess.elements import integral
from pyioflash.postprocess.analyses import series
if TYPE_CHECKING:
from pyioflash.simulation.data import SimulationData
from pyioflash.postprocess.utility import Type_Step, Type_Field, Type_Index, Type_Output
# define the module api
def __dir__() -> List[str]:
return ["thermal", "kinetic", "kinetic_mean", "kinetic_turbulant"]
def thermal(data: 'SimulationData', step: 'Type_Step' = -1, *,
wrapped: bool = False, mapping: Dict[str, str] = {},
scale: Optional[float] = None, index: Optional['Type_Index'] = None,
withguard: bool = False, keepdims: bool = True) -> 'Type_Output':
"""
Provides a method for calculation of the thermal energy by
consuming a SimulationData object; must have a 'temp' attribute in the
SimulationData.fields object.
Attributes:
data: object containing relavent flash simulation output
step: time-like specification for which to process data, the key (optional)
wrapped: whether to wrap context around result of sourcing (optional)
mapping: if wrapped, how to map context to options of the next operation (optional)
scale: used to convert returned quantity to dimensional units (optional)
index: used for custom slicing operation; should be (blks, k, j, i) (optional)
withguard: retain guard cell data for ploting and other actions (optional)
keepdims: retain unused dimensions for broadcasting, else drop them (optional)
Note:
The thermal energy is computed according to the formula
E(t)~ijk~ = T(t)~ijk~
*where t = step, step is float*
* t = times[step], step is int*
*ijk = all cells*
The returned quantity is on the cell centered grid
This function does not generate any dynamic context; this even if wrapping is desired and specified, the
mapping attribute is ignored.
Todo:
"""
# convert to integer from key if necessary
if isinstance(step, float):
try:
step, = data.utility.indices(step)
except ValueError as error:
print(error)
print('Could not find provided step in simulation keys!')
# need the dimensionality
dimension = data.geometry.grd_dim
# get guard size
guards = data.geometry.blk_guards
# need to define slicing operators based on dims
if index is None:
i_all = slice(None)
i_zax = 0 if not withguard else int(guards / 2)
index = (i_all, ) * 4 if (keepdims or dimension == 3) else (i_all, i_zax, i_all, i_all)
# define lookup based on desired guarding option
name = 'temp'
if withguard:
name = '_' + name
# thermal energy is temp in nondimensional units
energy = data.fields[name][step][0]
# apply a dimensional scale
if scale is not None:
energy = energy * scale
# index results if desired
energy = energy[index]
# wrap result of integration if desired (no context to provide)
wrap = {True: lambda source: Output(source), False: lambda source: source}
return wrap[wrapped](energy)
def kinetic(data: 'SimulationData', step: 'Type_Step' = -1, *,
wrapped: bool = False, mapping: Dict[str, str] = {},
scale : Optional[float] = None, index: Optional['Type_Index'] = None,
withguard: bool = False, keepdims: bool = True) -> 'Type_Output':
"""
Provides a method for calculation of the total kinetic energy by
consuming a SimulationData object; must have 'fcx2', 'fcy2' ('fcz2' if 3d)
attributes in the SimulationData.fields object.
Attributes:
data: object containing relavent flash simulation output
step: time-like specification for which to process data, the key (optional)
wrapped: whether to wrap context around result of sourcing (optional)
mapping: if wrapped, how to map context to options of the next operation (optional)
scale: used to convert returned quantity to dimensional units (optional)
index: used for custom slicing operation; should be (blks, k, j, i) (optional)
withguard: retain guard cell data for ploting and other actions (optional)
keepdims: retain unused dimensions for broadcasting, else drop them (optional)
Note:
The total kinetic energy is computed according to the formula
E(t)~ijk~ = u(t)~ijk~^2^ + v(t)~ijk~^2^ + w(t)~ijk~^2^
*where t = step, step is float*
* t = times[step], step is int*
*ijk = all cells*
where the all terms are interpolated to cell centers
This function does not generate any dynamic context; this even if wrapping is desired and specified, the
mapping attribute is ignored.
Todo:
"""
# convert to integer from key if necessary
if isinstance(step, float):
try:
step, = data.utility.indices(step)
except ValueError as error:
print(error)
print('Could not find provided step in simulation keys!')
# need the dimensionality
dimension = data.geometry.grd_dim
# get guard size
guards = data.geometry.blk_guards
# need to define slicing operators based on dims
if index is None:
i_all = slice(None)
i_zax = 0 if not withguard else int(guards / 2)
index = (i_all, ) * 4 if (keepdims or dimension == 3) else (i_all, i_zax, i_all, i_all)
# calculate kinetic energy
energy = _interpolate_ftc(data.fields['_fcx2'][step][0], 0, guards, dimension, withguard=withguard)**2
energy = _interpolate_ftc(data.fields['_fcy2'][step][0], 1, guards, dimension, withguard=withguard)**2 + energy
if dimension == 3:
energy = _interpolate_ftc(data.fields['_fcz2'][step][0], 2, guards, dimension,
withguard=withguard)**2 + energy
# apply a dimensional scale
if scale is not None:
energy = energy * scale
# index results if desired
energy = energy[index]
# wrap result of integration if desired (no context to provide)
wrap = {True: lambda source: Output(source), False: lambda source: source}
return wrap[wrapped](energy)
def kinetic_mean(data: 'SimulationData', steps: Optional['Type_Index'] = slice(None), *,
start: Optional['Type_Step'] = None, stop: Optional['Type_Step'] = None, skip: Optional[int] = None,
wrapped: bool = False, mapping: Dict[str, str] = {},
scale : Optional[float] = None, index: Optional['Type_Index'] = None,
withguard: bool = False, keepdims: bool = True) -> 'Type_Output':
"""
Provides a method for calculation of the mean or time-averaged kinetic energy by
consuming a SimulationData object and a time interval specification;
must have 'fcx2', 'fcy2' ('fcz2' if 3d) attributes in the SimulationData.fields object.
Attributes:
data: object containing relavent flash simulation output
steps: iterable time-like specification for which to process data, the keys (optional)
start: used to determine the starting time-like specification, start key (optional)
stop: used to determine the ending time-like specification, stop key (optional)
skip: used to determine the sampling interval for the specification (optional)
wrapped: whether to wrap context around result of sourcing (optional)
mapping: if wrapped, how to map context to options of the next operation (optional)
scale: used to convert returned quantity to dimensional units (optional)
index: used for custom slicing operation; should be (blks, k, j, i) (optional)
withguard: retain guard cell data for ploting and other actions (optional)
keepdims: retain unused dimensions for broadcasting, else drop them (optional)
Note:
The mean kinetic energy is computed according to the formula
E(t)~ijk~ = $\sum_{$\tau$=t~0~}^{t} (u($\tau$)~ijk~^2^ + v($\tau)~ijk~^2^ + w($tau$)~ijk~^2^) / N
*where the all terms are interpolated to cell centers*
This function does not generate any dynamic context; this even if wrapping is desired and specified, the
mapping attribute is ignored.
Todo:
"""
# need the dimensionality
dimension = data.geometry.grd_dim
# get guard size
guards = data.geometry.blk_guards
# need to define slicing operators based on dims
if index is None:
i_all = slice(None)
i_zax = 0 if not withguard else int(guards / 2)
index = (i_all, ) * 4 if (keepdims or dimension == 3) else (i_all, i_zax, i_all, i_all)
# use provided information to source times
if start or stop:
steps = slice(start, stop, skip)
times = data.utility.times(steps)
steps = data.utility.indices(steps)
# use time series analysis to retreve mean kinetic energy
source = make_sourceable(source=kinetic, args=data, method='step', options={'withguard': withguard})
stack = make_stackable(element=integral.time, args=data, method='whole', options={'times': times})
energy = series.simple(source=source, sourceby=steps, stack=stack)
# apply a dimensional scale
if scale is not None:
energy = energy * scale
# index results if desired
energy = energy[index]
# wrap result of integration if desired (no context to provide)
wrap = {True: lambda source: Output(source), False: lambda source: source}
return wrap[wrapped](energy)
def kinetic_turbulant(data: 'SimulationData', step: Optional['Type_Step'] = -1, *,
mean: Optional['Type_Field'] = None, start: Optional['Type_Step'] = None,
stop: Optional['Type_Step'] = None, skip: Optional[int] = None,
wrapped: bool = False, mapping: Dict[str, str] = {},
scale : Optional[float] = None, index: Optional['Type_Index'] = None,
withguard: bool = False, keepdims: bool = True) -> 'Type_Output':
"""
Provides a method for calculation of the turbulant kinetic energy by
consuming a SimulationData object and a either a mean field or a time
interval specification to determine the mean field; must have 'fcx2',
'fcy2' ('fcz2' if 3d) attributes in the SimulationData.fields object.
Attributes:
data: object containing relavent flash simulation output
step: time-like specification for which to process data, the key (optional)
mean: provide mean turbulant kinetic energy to avoid calculating it (optional)
start: used to determine the starting time-like specification, start key (optional)
stop: used to determine the ending time-like specification, stop key (optional)
skip: used to determine the sampling interval for the specification (optional)
wrapped: whether to wrap context around result of sourcing (optional)
mapping: if wrapped, how to map context to options of the next operation (optional)
scale: used to convert returned quantity to dimensional units (optional)
index: used for custom slicing operation; should be (blks, k, j, i) (optional)
withguard: retain guard cell data for ploting and other actions (optional)
keepdims: retain unused dimensions for broadcasting, else drop them (optional)
Note:
The turbulant kinetic energy is computed according to the formula
E(t)~ijk~ = (u(t)~ijk~ - u_bar~ijk~)^2^ + ...
*where the all terms are interpolated to cell centers*
If a mean is provided it must be a 2 or 3 component field broadcastable with simulation data velocity
components; specifically, (dims, blks, k, j, i).
This function does not generate any dynamic context; this even if wrapping is desired and specified, the
mapping attribute is ignored.
Todo:
"""
# convert to integer from key if necessary
if isinstance(step, float):
try:
step, = data.utility.indices(step)
except ValueError as error:
print(error)
print('Could not find provided step in simulation keys!')
# need the dimensionality
dimension = data.geometry.grd_dim
# get guard size
guards = data.geometry.blk_guards
# need to define slicing operators based on dims
if index is None:
i_all = slice(None)
i_zax = 0 if not withguard else int(guards / 2)
index = (i_all, ) * 4 if (keepdims or dimension == 3) else (i_all, i_zax, i_all, i_all)
# retieve mean velocity components if not provided
if mean is None:
components = fields.velocity_mean(data, start=start, stop=stop, skip=skip, withguard=withguard)
u_bar, v_bar = components[:2]
if dimension == 3:
w_bar = components[2]
# mean is provided
else:
# is the provided mean usable
if not hasattr(mean, '__len__') and len(mean) not in (2, 3):
raise TypeError('Provided mean does not have three velocity components!')
# unpack provided components
u_bar, v_bar = mean[:2]
if dimension == 3:
w_bar = mean[2]
# calculate instantanious velocity components on cell-centers
u_ins = _interpolate_ftc(data.fields['_fcx2'][step][0], 0, guards, dimension, withguard=withguard)
v_ins = _interpolate_ftc(data.fields['_fcy2'][step][0], 1, guards, dimension, withguard=withguard)
if dimension == 3:
w_ins = _interpolate_ftc(data.fields['_fcz2'][step][0], 2, guards, dimension, withguard=withguard)
# calculate turbulant kinetic energy
energy = ((u_ins - u_bar)**2 + (v_ins - v_bar)**2) / 2
if dimension == 3:
energy = energy + ((w_ins - w_bar)**2 / 2)
# apply a dimensional scale
if scale is not None:
energy = energy * scale
# index results if desired
energy = energy[index]
# wrap result of integration if desired (no context to provide)
wrap = {True: lambda source: Output(source), False: lambda source: source}
return wrap[wrapped](energy)
| 41.53022 | 117 | 0.65337 | 1,950 | 15,117 | 5.005641 | 0.134359 | 0.008196 | 0.017211 | 0.014753 | 0.759451 | 0.749718 | 0.744084 | 0.743366 | 0.734761 | 0.715603 | 0 | 0.007084 | 0.262287 | 15,117 | 363 | 118 | 41.644628 | 0.868185 | 0.515512 | 0 | 0.632 | 0 | 0 | 0.077136 | 0 | 0 | 0 | 0 | 0.013774 | 0 | 1 | 0.04 | false | 0 | 0.056 | 0.008 | 0.136 | 0.048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a639540a56aa76a61d20fdf336afd10e7e24ac4f | 25 | py | Python | pydobot/__init__.py | luismesas/pyDobotMagician | 2ff8d7e1318ac9b7bc32bd33ff327d343f44927c | [
"MIT"
] | 82 | 2017-04-08T04:15:39.000Z | 2022-02-18T08:16:01.000Z | pydobot/__init__.py | luismesas/pyDobotMagician | 2ff8d7e1318ac9b7bc32bd33ff327d343f44927c | [
"MIT"
] | 30 | 2017-04-13T09:45:59.000Z | 2022-03-11T07:51:31.000Z | pydobot/__init__.py | luismesas/pyDobotMagician | 2ff8d7e1318ac9b7bc32bd33ff327d343f44927c | [
"MIT"
] | 53 | 2017-06-13T15:36:47.000Z | 2022-03-31T12:39:26.000Z | from .dobot import Dobot
| 12.5 | 24 | 0.8 | 4 | 25 | 5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 25 | 1 | 25 | 25 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a6a0c7b8176fbfad01e7091cb6d0c95ac7af6175 | 43 | py | Python | calc.py | negi524/python_test | 882e669d14ffb012dd2b640377d457541b9e8360 | [
"MIT"
] | null | null | null | calc.py | negi524/python_test | 882e669d14ffb012dd2b640377d457541b9e8360 | [
"MIT"
] | null | null | null | calc.py | negi524/python_test | 882e669d14ffb012dd2b640377d457541b9e8360 | [
"MIT"
] | null | null | null |
def double(number):
return 2 * number
| 10.75 | 21 | 0.651163 | 6 | 43 | 4.666667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 0.255814 | 43 | 3 | 22 | 14.333333 | 0.84375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
4711b39e3ed00cc53aad08ad86ae1d4abf91db24 | 127 | py | Python | bootstrap/lib/overwrite_print.py | Cadene/bootstrap.pytorch | e7d55b52fe8d819de7ea3da8b1027d4a3dcc9e0c | [
"BSD-3-Clause"
] | 196 | 2018-01-12T01:07:47.000Z | 2022-03-18T21:42:11.000Z | bootstrap/lib/overwrite_print.py | jbegaint/bootstrap.pytorch | 43b0be90e39fdb96018411cb5bfad6bc9d29f023 | [
"BSD-3-Clause"
] | 32 | 2019-02-24T11:08:22.000Z | 2020-07-17T14:33:02.000Z | bootstrap/lib/overwrite_print.py | jbegaint/bootstrap.pytorch | 43b0be90e39fdb96018411cb5bfad6bc9d29f023 | [
"BSD-3-Clause"
] | 30 | 2018-03-22T23:51:01.000Z | 2022-03-27T12:13:06.000Z | from .logger import Logger
# TODO: better overwritting
def print(*msg):
Logger().log_message(*msg, stack_displacement=2)
| 18.142857 | 52 | 0.740157 | 17 | 127 | 5.411765 | 0.823529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009174 | 0.141732 | 127 | 6 | 53 | 21.166667 | 0.834862 | 0.19685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0.333333 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
472520b70cd4e70a8f2ea258150ec062690ac29e | 25,989 | py | Python | agent_listener.py | fringsoo/pragmatics_game | 1b8ca08a043dbbd063374d9303e843aceb9fc335 | [
"MIT"
] | 4 | 2020-12-10T10:29:47.000Z | 2021-06-06T23:06:43.000Z | agent_listener.py | fringsoo/pragmatics_game | 1b8ca08a043dbbd063374d9303e843aceb9fc335 | [
"MIT"
] | 4 | 2021-06-06T13:05:37.000Z | 2021-07-02T12:15:28.000Z | agent_listener.py | fringsoo/pragmatics_game | 1b8ca08a043dbbd063374d9303e843aceb9fc335 | [
"MIT"
] | 1 | 2021-06-06T14:59:54.000Z | 2021-06-06T14:59:54.000Z | import random
import numpy as np
import scipy
import time
import json
import os
import pdb
import pickle
import pandas
from progressbar import *
from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot
from keras.models import Sequential, load_model, Model
from keras.optimizers import RMSprop, Adam, SGD
from keras import backend as K
from keras import regularizers
from keras.utils.np_utils import to_categorical
from utils import convnet_vgg, convnet_mod, convnet_ori, convnet_com
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
#return x / np.linalg.norm(x)
def makeFunc(x):
return lambda y:y[:,x]
class BaseListenerNetwork(object):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, config_dict):
self.modelname = modelname
self.optfilename = optfilename
self.lr = lr
self.entropy_coefficient = entropy_coefficient
assert config_dict, "config_dict does not exist"
self.config = config_dict
self.initialize_model()
self.build_train_fn()
def rebuild_train_fn(self, entropy_coefficient=None, lr=None):
if entropy_coefficient:
self.entropy_coefficient = entropy_coefficient
if lr:
self.lr = lr
self.build_train_fn()
def save(self):
self.listener_model.save(self.modelname)
def load(self):
self.listener_model = load_model(self.modelname)
def save_weights(self):
self.listener_model.save_weights(self.modelname)
def load_weights(self):
self.listener_model.load_weights(self.modelname)
def save_opt(self):
symbolic_weights = self.opt.weights
weight_values = K.batch_get_value(symbolic_weights)
with open(self.optfilename, 'wb') as f:
pickle.dump(weight_values, f)
def load_opt(self):
with open(self.optfilename, 'rb') as f:
weight_values = pickle.load(f)
self.opt.set_weights(weight_values)
def save_memory(self):
self.memory_model_weights = self.listener_model.get_weights()
def load_memory(self):
self.listener_model.set_weights(self.memory_model_weights)
class PaperListenerNetwork(BaseListenerNetwork):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, config_dict):
super(PaperListenerNetwork, self).__init__(modelname, optfilename, lr, entropy_coefficient, config_dict)
self.batch_speaker_message = []
self.batch_action = []
self.batch_candidates = []
self.batch_reward = []
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
t_input = Input(shape=(self.config['max_message_length'],)) #Speakers Message, shape(bs, max_message_length)
c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_dim'])) #Candidates, shape(bs, n_class, speaker_input_dim)
inputs = [t_input, c_inputs_all]
z = Dense(self.config['speaker_input_dim'], activation='sigmoid')(t_input) #shape(bs, speaker_input_dim)
ts = []
us = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
c_input = Lambda(makeFunc(_))(c_inputs_all) #shape(bs, speaker_input_dim)
#t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x), axis=1)))(add([t_trans, Lambda(lambda x: -x)(c_input)])) #shape(bs, 1)
t = Dot(1, False)([z, c_input]) #shape(bs, 1)
ts.append(t)
us.append(c_input)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
#final_output = Dense(self.n_classes, activation='softmax', kernel_initializer='identity')(U)
#final_output = Dense(self.n_classes, activation='softmax')(U)
#f1 = Dense(50)(U)
#f2 = Lambda(lambda x: K.square(x))(f1)
#final_output = Dense(self.n_classes, activation='softmax')(f2)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
action_prob_placeholder = self.listener_model.output[0] #(bs, n_classes)
action_onehot_placeholder = K.placeholder(shape=(None, self.config['n_classes']), name="action_onehot") #(bs, n_classes)
reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = K.sum(action_prob_placeholder * action_onehot_placeholder, axis=1)
log_action_prob = K.log(action_prob)
loss = - log_action_prob * reward_placeholder
entropy = K.sum(action_prob_placeholder * K.log(action_prob_placeholder + 1e-10), axis=1)
#entropy = K.sum(entropy)
loss = loss + self.entropy_coefficient * entropy
loss = K.mean(loss)
self.opt = Adam(lr=self.lr)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=loss)
if os.path.exists(self.optfilename):
self.load_opt()
self.train_fn = K.function(
inputs = self.listener_model.input + [action_onehot_placeholder, reward_placeholder],
outputs=[loss, loss], updates=self.updates)
def reshape_message_candidates(self, speaker_message, candidates):
assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_dim']
speaker_message = np.expand_dims(speaker_message, axis=0) #shape(1, max_message_length)
#X = [speaker_message] + [c.reshape([1,-1]) for c in candidates]
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
def sample_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates)
listener_output= self.listener_model.predict_on_batch(X)
y, U, z = listener_output[:3]
#us = listener_output[3]
listener_probs = y
listener_probs = np.squeeze(listener_probs) #shape(n_class)
listener_action = np.random.choice(np.arange(self.config['n_classes']), p=listener_probs) #int
U = np.squeeze(U)
return listener_action, listener_probs, U
def infer_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates)
listener_output= self.listener_model.predict_on_batch(X)
y, U, z = listener_output[:3]
#us = listener_output[3]
listener_probs = y
listener_probs = np.squeeze(listener_probs) #shape(n_class)
listener_action = np.argmax(listener_probs) #int
U = np.squeeze(U)
return listener_action, listener_probs, U
def train_listener_policy_on_batch(self):
"""
Train as a batch. Loss is an float for a batch
"""
action_onehot = to_categorical(self.batch_action, num_classes=self.config['n_classes'])
#self.batch_candidates = np.array(self.batch_candidates).transpose([1, 0, 2]).tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = np.swapaxes(np.array(self.batch_candidates), 0, 1).tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = np.swapaxes(np.array(self.batch_candidates), 0, 1).astype('float32').tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = [np.array(_) for _ in self.batch_candidates]
#_loss, _entropy = self.train_fn([self.batch_speaker_message] + self.batch_candidates + [action_onehot, self.batch_reward] )
_loss, _entropy = self.train_fn([np.array(self.batch_speaker_message), self.batch_candidates, action_onehot, self.batch_reward] )
#print("Listener loss: ", _loss)
self.batch_speaker_message = [] #shape(bs, max_message_length)
self.batch_action = [] #shape(bs)
self.batch_candidates = [] #shape(bs, n_classes, speaker_input_dim)
self.batch_reward = [] #shape(bs)
def remember_listener_training_details(self, speaker_message, action, action_probs, target, candidates, reward):
"""
Inputs are just one instance. No bs dimensize.
"""
self.batch_speaker_message.append(speaker_message)
self.batch_action.append(action)
self.batch_candidates.append(candidates)
self.batch_reward.append(reward)
class PaperListenerNetwork_rnn(PaperListenerNetwork):
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_dim']
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, message_length, alphabet_size)
#X = [speaker_message] + [c.reshape([1,-1]) for c in candidates]
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
def initialize_model(self):
"""
Batch input and output.
"""
## Define model
if not os.path.exists(self.modelname):
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
#c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_dim'])) #Candidates, shape(bs, n_classes, speaker_input_dim)
c_inputs_all = Input(shape=(None, self.config['speaker_input_dim'])) #Candidates, shape(bs, n_classes, speaker_input_dim)
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
ts = []
us = []
u = Dense(self.config['listener_dim'], activation='sigmoid')
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
c_input = Lambda(makeFunc(_))(c_inputs_all)
uc = u(c_input)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x), axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
#t = Dot(1, False)([z,uc]) #shape(bs, 1)
ts.append(t)
us.append(uc)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U)
#shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
def set_updates(self):
self.opt = Adam(lr=self.lr)
#adam = RMSprop(lr=self.lr)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
action_prob_placeholder = self.listener_model.output[0] #(bs, n_classes)
#action_onehot_placeholder = K.placeholder(shape=(None, self.config['n_classes']), name="action_onehot") #(bs, n_classes)
action_onehot_placeholder = K.placeholder(shape=(None, None), name="action_onehot") #(bs, n_classes)
reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = K.sum(action_prob_placeholder*action_onehot_placeholder, axis=1)
log_action_prob = K.log(action_prob)
loss = - log_action_prob*reward_placeholder
entropy = K.sum(action_prob_placeholder * K.log(action_prob_placeholder + 1e-10), axis=1)
#entropy = K.sum(entropy)
loss = loss + self.entropy_coefficient * entropy
loss = K.mean(loss)
self.loss =loss
self.set_updates()
self.train_fn = K.function(
inputs = self.listener_model.input + [action_onehot_placeholder, reward_placeholder],
outputs=[loss, loss], updates=self.updates)
def remember_listener_training_details(self, speaker_message, action, action_probs, target, candidates, reward):
"""
Inputs are just one instance. No bs dimensize.
"""
#if not self.config['fixed_length']:
toadd = self.config['max_message_length'] - len(speaker_message)
for _ in range(toadd):
speaker_message = np.append(speaker_message, -1)
speaker_message = to_categorical(speaker_message, self.config['alphabet_size']) #shape(message_length, alphabet_size)
self.batch_speaker_message.append(speaker_message)
self.batch_action.append(action)
self.batch_candidates.append(candidates)
self.batch_reward.append(reward)
class PaperListenerNetwork_rnn_conv(PaperListenerNetwork_rnn):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, pretrain_convmodel_file, traincnn, config):
self.pretrain_convmodel_file = pretrain_convmodel_file
self.traincnn = traincnn
super(PaperListenerNetwork_rnn_conv, self).__init__(modelname, optfilename, lr, entropy_coefficient, config)
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
self.conv_model = convnet_com(self.config['speaker_input_w'], self.config['speaker_input_h'], 3, preloadfile=self.pretrain_convmodel_file, name='conv_model_l')
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_w'], self.config['speaker_input_h'], 3), name='image_l') #Candidates, shape(bs, speaker_input_w, speaker_input_h, 3)
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
#u = Dense(self.config['listener_dim'], activation='sigmoid',kernel_regularizer=regularizers.l2(0.01))
u = Dense(self.config['listener_dim'], activation='sigmoid')
ts = []
us = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_w'],self.config['speaker_input_h'],3)) #speaker_model.input[0], shape(bs, speaker_input_w, speaker_input_h, 3)
#c_input = Lambda(lambda x: x[:, _])(c_inputs_all)
c_input = Lambda(makeFunc(_))(c_inputs_all)
conv_outputs = self.conv_model(c_input)
uc = u(conv_outputs)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
#t = Dot(1, False)([z,uc]) #shape(bs, 1)
ts.append(t)
us.append(uc)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
self.conv_model = [l for l in self.listener_model.layers if l.name=='conv_model_l'][0]
#self.listener_model.layers[6].kernel_regularizer = None
#self.internal_model = Model(inputs=self.listener_model.inputs, outputs=[self.listener_model.layers[7].get_output_at(_) for _ in range(2)] + [self.listener_model.layers[6].output, self.listener_model.layers[-2].output]) #dot
#self.internal_model = Model(inputs=self.listener_model.inputs, outputs=[self.listener_model.layers[6].get_output_at(_) for _ in range(2)] + [self.listener_model.layers[7].output, self.listener_model.layers[-2].output]) #euc
self.trainable_weights_others = []
self.trainable_weights_conv = []
for layer in self.listener_model.layers:
if layer.name!='conv_model_l':
self.trainable_weights_others.extend(layer.trainable_weights)
else:
self.trainable_weights_conv.extend(layer.trainable_weights)
def set_updates(self):
self.opt = Adam(lr=self.lr)
#self.opt = RMSprop(lr=self.lr)
#opt = SGD(lr=self.lr, momentum=0.9, decay=1e-6, nesterov=True)
if not self.traincnn:
#self.updates = self.opt.get_updates(params=self.trainable_weights_others+self.trainable_weights_rnn, loss=self.loss)
self.updates = self.opt.get_updates(params=self.trainable_weights_others, loss=self.loss)
else:
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==4 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_w'] and candidates.shape[2]==self.config['speaker_input_h']
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, ?, alphabet_size)
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
'''
class PaperListenerNetwork_rnn_conv_color(PaperListenerNetwork_rnn):
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
c_inputs_all = Input(shape=(self.config['n_classes'], 8))
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
u = Dense(self.config['listener_dim'], activation='sigmoid')
ts = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_w'],self.config['speaker_input_h'],3)) #speaker_model.input[0], shape(bs, speaker_input_w, speaker_input_h, 3)
#c_input = Lambda(lambda x: x[:, _])(c_inputs_all)
c_input = Lambda(makeFunc(_))(c_inputs_all)
#conv_outputs = conv_model(c_input)
#conv_outputs = c_input
uc = u(c_input)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
ts.append(t)
U = concatenate(ts) #shape(bs, n_classes)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, z, U])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
self.trainable_weights_rnn = self.listener_model.trainable_weights[:3]
self.trainable_weights_others = self.listener_model.trainable_weights[3:]
def set_updates(self):
self.opt = Adam(lr=self.lr)
#opt = RMSprop(lr=self.lr)
#opt = SGD(lr=self.lr, momentum=0.9, decay=1e-6, nesterov=True)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
#pdb.set_trace()
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==8
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, ?, alphabet_size)
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
class PaperListenerNetwork_direct(BaseListenerNetwork):
def __init__(self, modelname, config_dict):
assert False #TOMODIFY
super(PaperListenerNetwork_direct, self).__init__(modelname, config_dict)
self.batch_speaker_message = []
self.batch_action = []
self.batch_candidates = []
self.batch_reward = []
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
## Speakers Message
t_input = Input(shape=(self.config['max_message_length'],)) #shape(bs, max_message_length)
t_trans = Dense(self.config['speaker_input_dim'],
#kernel_initializer=keras.initializers.Identity(gain=1.0),
#bias_initializer='zeros',
activation='sigmoid')(t_input) #shape(bs, speaker_input_dim)
inputs = [t_input]
ts = []
for _ in range(self.config['n_classes']):
c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([t_trans, Lambda(lambda x: -x)(c_input)])) #shape(bs, 1)
inputs.append(c_input)
ts.append(t)
U = concatenate(ts) #shape(bs, n_classes)
listener_probs = U
#listener_probs = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
listener_infer_action = Lambda(lambda x: K.argmax(x))(U) #shape(bs)
target_onehot_placeholder = Input(shape=(self.config['n_classes'],), name="action_onehot") #(bs, n_classes)
listener_prob_2 = dot([listener_probs, target_onehot_placeholder], axes=1)
listener_prob_2 = Lambda(lambda x:K.squeeze(x, axis=1))(listener_prob_2)
self.listener_model = Model(inputs=inputs + [target_onehot_placeholder], outputs=[listener_probs, listener_infer_action, t_trans, listener_prob_2])
else:
self.load()
#check!!!
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
#reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = self.listener_model.output[3]
#loss = K.log(-action_prob)*reward_placeholder
#loss = - action_prob * reward_placeholder
loss = - action_prob
loss = K.mean(loss)
self.opt = Adam(lr=self.config['listener_lr'])
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights,loss=loss)
#if os.path.exists(self.optfilename):
# self.load_opt()
self.train_fn = K.function(
#inputs = self.listener_model.input + [reward_placeholder],
inputs = self.listener_model.input,
outputs=[loss, loss], updates=self.updates)
def sample_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates) + [np.zeros([1, self.config['n_classes']])]
listener_probs, listener_infer_action, _t_trans, _lp2 = self.listener_model.predict_on_batch(X)
listener_probs = np.squeeze(listener_probs) #shape(n_class)
#listener_probs = scipy.special.softmax(listener_probs)
listener_probs = softmax(listener_probs)
#pdb.set_trace() #???norm???
listener_action = np.random.choice(np.arange(self.config['n_classes']), p=listener_probs) #int
return listener_action, listener_probs
def infer_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates) + [np.zeros([1, self.config['n_classes']])]
listener_probs, listener_infer_action, _t_trans, _lp2 = self.listener_model.predict_on_batch(X)
listener_probs = np.squeeze(listener_probs) #shape(n_class)
listener_probs = softmax(listener_probs)
listener_action = np.squeeze(listener_infer_action).tolist() #int
return listener_action, listener_probs
def train_listener_policy_on_batch(self):
"""
Train as a batch. Loss is an float for a batch
"""
self.batch_candidates = np.array(self.batch_candidates).transpose([1, 0, 2]).tolist() #shape(num_classes, bs, speaker_input_dim
#_loss, _entropy = self.train_fn([self.batch_speaker_message] + self.batch_candidates + [self.batch_action, self.batch_reward] )
_loss, _entropy = self.train_fn([self.batch_speaker_message] + self.batch_candidates + [self.batch_action] )
#print("Listener loss: ", _loss)
self.batch_speaker_message = [] #shape(bs, max_message_length)
self.batch_action = [] #shape(bs, n_classes)
self.batch_candidates = [] #shape(bs, n_classes, speaker_input_dim)
self.batch_reward = [] #shape(bs)
def remember_listener_training_details(self, speaker_message, action, action_probs, target, candidates, reward):
"""
Inputs are just one instance. No bs dimensize.
"""
#action_onehot = np.zeros(self.config['n_classes'])
#action_onehot[action] = 1
action_onehot = np.ones(self.config['n_classes']) * np.all(target==candidates, axis=1)
self.batch_action.append(action_onehot)
self.batch_speaker_message.append(speaker_message)
self.batch_candidates.append(candidates)
self.batch_reward.append(reward)
''' | 44.808621 | 227 | 0.719881 | 3,670 | 25,989 | 4.848501 | 0.068665 | 0.046645 | 0.042037 | 0.023266 | 0.8291 | 0.793638 | 0.777116 | 0.755536 | 0.724739 | 0.704901 | 0 | 0.006739 | 0.143522 | 25,989 | 580 | 228 | 44.808621 | 0.792668 | 0.194236 | 0 | 0.60241 | 0 | 0 | 0.053539 | 0 | 0 | 0 | 0 | 0 | 0.028112 | 1 | 0.116466 | false | 0 | 0.068273 | 0.004016 | 0.228916 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5b480e609340e7c3163c99f85166ef4016ae608a | 48 | py | Python | src/apps/trainings/services/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 27 | 2020-05-03T11:01:27.000Z | 2022-03-17T05:33:10.000Z | src/apps/trainings/services/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 54 | 2020-05-09T01:18:41.000Z | 2022-01-22T10:31:15.000Z | src/apps/trainings/services/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 9 | 2020-09-29T11:31:32.000Z | 2022-03-09T01:37:50.000Z | from .bayesian_elo import BayesianRatingService
| 24 | 47 | 0.895833 | 5 | 48 | 8.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 48 | 1 | 48 | 48 | 0.954545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5b8b6d1f4ab0f3039cf2da903751e51e15cb144e | 1,768 | py | Python | Variado_GeekUniversity/guppe/args.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | Variado_GeekUniversity/guppe/args.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | Variado_GeekUniversity/guppe/args.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | """
Entendendo o *args
- O *args é um parâmetro, como outro qualquer. Isso significa que você poderá
charmar de qualquer coisa, desde que começe com asterisco.
Exemplo:
*xis
Mas por convenção, utilizamos *args para definí-lo
Mas o que é o *args?
O parâmetro *args utilizado em uma função, coloca os valores extras informados como
entrada em uma tupla. Então desde já lembre-se que tuplas são imutáveis.
# Exemplos
def soma_todos_numeros(num1=1, num2=2, num3=3, num4=4):
return num1 + num2 + num3 + num4
print(soma_todos_numeros(4, 6, 9))
print(soma_todos_numeros(4, 6))
print(soma_todos_numeros(4, 6, 9, 5))
# Entendendo o args
def soma_todos_numeros(nome, email, *args):
return sum(args)
print(soma_todos_numeros('Angelina', 'Jolie'))
print(soma_todos_numeros('Angelina', 'Jolie', 1))
print(soma_todos_numeros('Angelina', 'Jolie', 2, 3))
print(soma_todos_numeros('Angelina', 'Jolie', 2, 3, 4))
print(soma_todos_numeros('Angelina', 'Jolie', 3, 4, 5, 6))
print(soma_todos_numeros('Angelina', 'Jolie', 23.4, 12.5))
# Outro exemplo de utilização do *args
def verifica_info(*args):
if 'Geek' in args and 'University' in args:
return 'Bem-vindo Geek!'
return 'Eu não tenho certeza quem você é ...'
print(verifica_info())
print(verifica_info(1, True, 'University', 'Geek'))
print(verifica_info(1, 'University', 3.145))
"""
def soma_todos_numeros(*args):
return sum(args)
# print(soma_todos_numeros())
# print(soma_todos_numeros(3, 4, 5, 6))
numeros = [1, 2, 3, 4, 5, 6, 7]
# Desempacotador
print(soma_todos_numeros(*numeros))
# OBS: O asterisco serve para que informemos ao Python que estamos
#passando como argumento uma coleção de dados. Desta forma, ele saberá
# que precisará antes desempacotar estes dados. | 22.379747 | 83 | 0.718891 | 278 | 1,768 | 4.44964 | 0.399281 | 0.109135 | 0.194018 | 0.203719 | 0.270816 | 0.270008 | 0.158448 | 0.119644 | 0 | 0 | 0 | 0.036242 | 0.15724 | 1,768 | 79 | 84 | 22.379747 | 0.79396 | 0.915724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.151899 | 0 | 1 | 0.25 | false | 0 | 0 | 0.25 | 0.5 | 0.25 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
5bebb317f5eba89ee7abe9c379e69ee3e8996039 | 832 | py | Python | octicons16px/typography.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | 1 | 2021-01-28T06:47:39.000Z | 2021-01-28T06:47:39.000Z | octicons16px/typography.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | null | null | null | octicons16px/typography.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | null | null | null |
OCTICON_TYPOGRAPHY = """
<svg class="octicon octicon-typography" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M6.21 8.5L4.574 3.594 2.857 8.5H6.21zm.5 1.5l.829 2.487a.75.75 0 001.423-.474L5.735 2.332a1.216 1.216 0 00-2.302-.018l-3.39 9.688a.75.75 0 001.415.496L2.332 10H6.71zm3.13-4.358C10.53 4.374 11.87 4 13 4c1.5 0 3 .939 3 2.601v5.649a.75.75 0 01-1.448.275C13.995 12.82 13.3 13 12.5 13c-.77 0-1.514-.231-2.078-.709-.577-.488-.922-1.199-.922-2.041 0-.694.265-1.411.887-1.944C11 7.78 11.88 7.5 13 7.5h1.5v-.899c0-.54-.5-1.101-1.5-1.101-.869 0-1.528.282-1.84.858a.75.75 0 11-1.32-.716zM14.5 9H13c-.881 0-1.375.22-1.637.444-.253.217-.363.5-.363.806 0 .408.155.697.39.896.249.21.63.354 1.11.354.732 0 1.26-.209 1.588-.449.35-.257.412-.495.412-.551V9z"></path></svg>
"""
| 166.4 | 801 | 0.671875 | 214 | 832 | 2.607477 | 0.602804 | 0.028674 | 0.035842 | 0.028674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.540576 | 0.081731 | 832 | 4 | 802 | 208 | 0.189791 | 0 | 0 | 0 | 0 | 0.333333 | 0.966306 | 0.374248 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5bf0b016edad2d418df38513a8b8d696e97578f5 | 39 | py | Python | nesmdb/__init__.py | duhaime/nesmdb | d56b176cebbcf91b0069fc529f0884768acf42e8 | [
"MIT"
] | 408 | 2018-06-07T22:53:16.000Z | 2022-03-23T09:48:57.000Z | nesmdb/__init__.py | duhaime/nesmdb | d56b176cebbcf91b0069fc529f0884768acf42e8 | [
"MIT"
] | 7 | 2018-07-05T23:51:40.000Z | 2022-03-04T07:54:04.000Z | nesmdb/__init__.py | duhaime/nesmdb | d56b176cebbcf91b0069fc529f0884768acf42e8 | [
"MIT"
] | 36 | 2018-06-07T22:59:16.000Z | 2022-03-01T01:37:05.000Z | import apu
import convert
import cycle
| 9.75 | 14 | 0.846154 | 6 | 39 | 5.5 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 39 | 3 | 15 | 13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f33ba4af0bdaf50c9e8fc29b830b7c17188cc394 | 99 | py | Python | TSSR/__init__.py | cestcedric/TSSR-GAN | d6e1b50409e0f0591660552993e6d5b70d41e766 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | TSSR/__init__.py | cestcedric/TSSR-GAN | d6e1b50409e0f0591660552993e6d5b70d41e766 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | TSSR/__init__.py | cestcedric/TSSR-GAN | d6e1b50409e0f0591660552993e6d5b70d41e766 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | from .Blocks import *
from .Discriminator import *
from .Generator import *
from .Upscaler import * | 24.75 | 28 | 0.767677 | 12 | 99 | 6.333333 | 0.5 | 0.394737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.151515 | 99 | 4 | 29 | 24.75 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f36756ca953d66dd6052a2a0fef734ba6b8d7e9c | 39 | py | Python | zeus/vcs/__init__.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
] | 221 | 2017-07-03T17:29:21.000Z | 2021-12-07T19:56:59.000Z | zeus/vcs/__init__.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
] | 298 | 2017-07-04T18:08:14.000Z | 2022-03-03T22:24:51.000Z | zeus/vcs/__init__.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
] | 24 | 2017-07-15T13:46:45.000Z | 2020-08-16T16:14:45.000Z | from .client import vcs_client # NOQA
| 19.5 | 38 | 0.769231 | 6 | 39 | 4.833333 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.179487 | 39 | 1 | 39 | 39 | 0.90625 | 0.102564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f368f490bf1d0b6df7007da64f3b8d18f870e51f | 33 | py | Python | sbober_fractals/__init__.py | BoberSA/skb_package_tutorial | 3b7671b981b7f9b39abe7a07335d2351d8749d76 | [
"MIT"
] | null | null | null | sbober_fractals/__init__.py | BoberSA/skb_package_tutorial | 3b7671b981b7f9b39abe7a07335d2351d8749d76 | [
"MIT"
] | null | null | null | sbober_fractals/__init__.py | BoberSA/skb_package_tutorial | 3b7671b981b7f9b39abe7a07335d2351d8749d76 | [
"MIT"
] | null | null | null | from .fractals import Mandelbrot
| 16.5 | 32 | 0.848485 | 4 | 33 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121212 | 33 | 1 | 33 | 33 | 0.965517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f399ec404e96615d00f2da0678d8677b21dbccfa | 647 | py | Python | monitoring-center-backend/tests/unit/test_model_probe.py | Heimdall-monitoring/monitoring-center | acf56a796c25fb6804fd16b1ff8c93645bc77ff3 | [
"MIT"
] | null | null | null | monitoring-center-backend/tests/unit/test_model_probe.py | Heimdall-monitoring/monitoring-center | acf56a796c25fb6804fd16b1ff8c93645bc77ff3 | [
"MIT"
] | 10 | 2020-09-09T14:37:05.000Z | 2020-11-26T13:14:09.000Z | monitoring-center-backend/tests/unit/test_model_probe.py | Heimdall-monitoring/monitoring-center | acf56a796c25fb6804fd16b1ff8c93645bc77ff3 | [
"MIT"
] | null | null | null | """
Test the probe model
"""
from monitoring_center import Probe
def test_equality_1():
assert Probe('1234', 'name1') == Probe('1234', 'name1')
assert Probe('1234', 'name2', 'description') == Probe('1234', 'name2', 'description')
def test_equality_2():
assert Probe('1234', 'name2') != Probe('1234', 'name3')
assert Probe('1234', 'name2') != Probe('12344', 'name2')
assert Probe('1234', 'name2', 'description') != Probe('1234', 'name2', 'description2')
assert Probe('1234', 'name2', 'description') != Probe('1234', 'name2')
def test_equality_3():
assert Probe('1234', 'name') != {'uuid': '1234', 'name': 'name'}
| 29.409091 | 90 | 0.616692 | 76 | 647 | 5.157895 | 0.302632 | 0.27551 | 0.285714 | 0.255102 | 0.471939 | 0.344388 | 0.344388 | 0.344388 | 0 | 0 | 0 | 0.133455 | 0.15456 | 647 | 21 | 91 | 30.809524 | 0.583181 | 0.030912 | 0 | 0 | 0 | 0 | 0.305331 | 0 | 0 | 0 | 0 | 0 | 0.636364 | 1 | 0.272727 | true | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f3b76d7efea9b5c07ddb9d24c98da602483cbfb2 | 226 | py | Python | scribdl/__init__.py | vaibhavkaushal11/scribd-downloader | 008e536f53df3478ae0ad48f4a0cba8ea6fff147 | [
"MIT"
] | 6 | 2019-05-23T08:50:26.000Z | 2021-04-04T03:54:31.000Z | scribdl/__init__.py | vaibhavkaushal11/scribd-downloader | 008e536f53df3478ae0ad48f4a0cba8ea6fff147 | [
"MIT"
] | null | null | null | scribdl/__init__.py | vaibhavkaushal11/scribd-downloader | 008e536f53df3478ae0ad48f4a0cba8ea6fff147 | [
"MIT"
] | 3 | 2019-06-13T05:50:34.000Z | 2019-08-16T16:58:23.000Z | from .version import __version__
from .downloader import Downloader
from .document import ScribdTextualDocument
from .document import ScribdImageDocument
from .book import ScribdBook
from .pdf_converter import ConvertToPDF
| 22.6 | 43 | 0.853982 | 25 | 226 | 7.52 | 0.48 | 0.12766 | 0.191489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119469 | 226 | 9 | 44 | 25.111111 | 0.944724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
45fffccf3dc264d9c9690c6abab9fa0e6d68859f | 97 | py | Python | src/interpreter/functions/slice.py | incrementals/b-star | 325bb51eafd5c5173582bf065b82d10ef9669275 | [
"MIT"
] | 2 | 2021-11-02T04:28:32.000Z | 2021-11-05T14:27:08.000Z | src/interpreter/functions/slice.py | incrementals/b-star | 325bb51eafd5c5173582bf065b82d10ef9669275 | [
"MIT"
] | 6 | 2022-01-07T22:49:19.000Z | 2022-03-11T05:39:04.000Z | src/interpreter/functions/slice.py | incrementals/b-star | 325bb51eafd5c5173582bf065b82d10ef9669275 | [
"MIT"
] | 4 | 2021-11-26T01:38:32.000Z | 2022-02-27T20:54:08.000Z | def slice_func(array, index_start, index_end):
return array[int(index_start):int(index_end)]
| 32.333333 | 49 | 0.773196 | 16 | 97 | 4.375 | 0.5625 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103093 | 97 | 2 | 50 | 48.5 | 0.804598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
341cec60b2aba43c556042c2752ac438228a90e8 | 120 | py | Python | OOP/Exercises/Inheritance_Lab/4_multilevel_inheritance/project/vehicle.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | 2 | 2022-03-04T11:39:03.000Z | 2022-03-13T07:13:23.000Z | OOP/Exercises/Inheritance_Lab/4_multilevel_inheritance/project/vehicle.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | null | null | null | OOP/Exercises/Inheritance_Lab/4_multilevel_inheritance/project/vehicle.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | null | null | null | class Vehicle:
def __init__(self) -> None:
pass
def move(self) -> str:
return 'moving...'
| 15 | 31 | 0.508333 | 13 | 120 | 4.384615 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.358333 | 120 | 7 | 32 | 17.142857 | 0.74026 | 0 | 0 | 0 | 0 | 0 | 0.075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0.2 | 0 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 |
343d79a78200c12521be7538385d0595fd8f5ad0 | 235 | py | Python | tonks/__init__.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | tonks/__init__.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | tonks/__init__.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | from ._version import __version__
from tonks.dataloader import MultiDatasetLoader
from tonks.ensemble import *
from tonks.learner import MultiTaskLearner, MultiInputMultiTaskLearner
from tonks.text import *
from tonks.vision import *
| 29.375 | 70 | 0.846809 | 27 | 235 | 7.185185 | 0.444444 | 0.231959 | 0.154639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.110638 | 235 | 7 | 71 | 33.571429 | 0.92823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3470c0e867b8fc3e58dfbe731c9e2f89beaae877 | 39 | py | Python | vae_lm/scripts/__init__.py | Nemexur/nonauto-lm | 6f237e4fc2b3b679cd92126ea5facd58d3cf6e75 | [
"Apache-2.0"
] | 3 | 2021-05-04T09:41:20.000Z | 2021-12-14T07:41:40.000Z | vae_lm/scripts/__init__.py | Nemexur/nonauto-lm | 6f237e4fc2b3b679cd92126ea5facd58d3cf6e75 | [
"Apache-2.0"
] | null | null | null | vae_lm/scripts/__init__.py | Nemexur/nonauto-lm | 6f237e4fc2b3b679cd92126ea5facd58d3cf6e75 | [
"Apache-2.0"
] | null | null | null | from .train_worker import train_worker
| 19.5 | 38 | 0.871795 | 6 | 39 | 5.333333 | 0.666667 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102564 | 39 | 1 | 39 | 39 | 0.914286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
caa036a17f79f370dbdf5e6398073c70793161ad | 238 | py | Python | books_management/publisher/resource.py | blackriddle/books-management | ba485a362a8bc50052dd6f4fc3884e639ca762b0 | [
"MIT"
] | null | null | null | books_management/publisher/resource.py | blackriddle/books-management | ba485a362a8bc50052dd6f4fc3884e639ca762b0 | [
"MIT"
] | null | null | null | books_management/publisher/resource.py | blackriddle/books-management | ba485a362a8bc50052dd6f4fc3884e639ca762b0 | [
"MIT"
] | null | null | null | from flask_restful import Resource
from model import Publisher
class PublisherResource(Resource):
def get(self):
pass
def post(self):
pass
def patch(self):
pass
def delete(self):
pass
| 13.222222 | 34 | 0.617647 | 28 | 238 | 5.214286 | 0.571429 | 0.219178 | 0.226027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.319328 | 238 | 17 | 35 | 14 | 0.901235 | 0 | 0 | 0.363636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.363636 | false | 0.363636 | 0.181818 | 0 | 0.636364 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
cabd0ae8af406e13aef94ab7ff8b51129c59efff | 6,611 | py | Python | test/cut/test_cut_ops_preserve_id.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 353 | 2020-10-31T10:38:51.000Z | 2022-03-30T05:22:52.000Z | test/cut/test_cut_ops_preserve_id.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 353 | 2020-10-27T23:25:12.000Z | 2022-03-31T22:16:05.000Z | test/cut/test_cut_ops_preserve_id.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 66 | 2020-11-01T06:08:08.000Z | 2022-03-29T02:03:07.000Z | import pytest
# Note:
# Definitions for `cut1`, `cut2` and `cut_set` parameters are standard Pytest fixtures located in test/cut/conftest.py
# ########################################
# ############### PADDING ################
# ########################################
@pytest.mark.parametrize("direction", ["right", "left", "both"])
def test_pad_cut_preserve_id_false(cut1, direction: str):
padded = cut1.pad(duration=300, direction=direction)
assert padded.id != cut1.id
@pytest.mark.parametrize("direction", ["right", "left", "both"])
def test_pad_cut_preserve_id_true(cut1, direction: str):
padded = cut1.pad(duration=300, direction=direction, preserve_id=True)
assert padded.id == cut1.id
@pytest.mark.parametrize("direction", ["right", "left", "both"])
def test_pad_mixed_cut_preserve_id_false(cut1, direction: str):
mixed = cut1.append(cut1)
padded = mixed.pad(duration=300, direction=direction)
assert padded.id != mixed.id
@pytest.mark.parametrize("direction", ["right", "left", "both"])
def test_pad_mixed_cut_preserve_id_true(cut1, direction: str):
mixed = cut1.append(cut1)
padded = mixed.pad(duration=300, direction=direction, preserve_id=True)
assert padded.id == mixed.id
# ########################################
# ############## APPENDING ###############
# ########################################
def test_append_cut_preserve_id_none(cut1, cut2):
appended = cut1.append(cut2)
assert appended.id != cut1.id
assert appended.id != cut2.id
def test_append_cut_preserve_id_left(cut1, cut2):
appended = cut1.append(cut2, preserve_id="left")
assert appended.id == cut1.id
assert appended.id != cut2.id
def test_append_cut_preserve_id_right(cut1, cut2):
appended = cut1.append(cut2, preserve_id="right")
assert appended.id != cut1.id
assert appended.id == cut2.id
def test_append_mixed_cut_preserve_id_none(cut1, cut2):
premixed = cut1.append(cut1)
appended = premixed.append(cut2)
assert appended.id != premixed.id
assert appended.id != cut2.id
def test_append_mixed_cut_preserve_id_left(cut1, cut2):
premixed = cut1.append(cut1)
appended = premixed.append(cut2, preserve_id="left")
assert appended.id == premixed.id
assert appended.id != cut2.id
def test_append_mixed_cut_preserve_id_right(cut1, cut2):
premixed = cut1.append(cut1)
appended = premixed.append(cut2, preserve_id="right")
assert appended.id != premixed.id
assert appended.id == cut2.id
# ########################################
# ############### MIXING #################
# ########################################
def test_mix_cut_preserve_id_none(cut1, cut2):
mixed = cut1.mix(cut2)
assert mixed.id != cut1.id
assert mixed.id != cut2.id
def test_mix_cut_preserve_id_left(cut1, cut2):
mixed = cut1.mix(cut2, preserve_id="left")
assert mixed.id == cut1.id
assert mixed.id != cut2.id
def test_mix_cut_preserve_id_right(cut1, cut2):
mixed = cut1.mix(cut2, preserve_id="right")
assert mixed.id != cut1.id
assert mixed.id == cut2.id
def test_mix_mixed_cut_preserve_id_none(cut1, cut2):
premixed = cut1.append(cut1)
mixed = premixed.mix(cut2)
assert mixed.id != premixed.id
assert mixed.id != cut2.id
def test_mix_mixed_cut_preserve_id_left(cut1, cut2):
premixed = cut1.append(cut1)
mixed = premixed.mix(cut2, preserve_id="left")
assert mixed.id == premixed.id
assert mixed.id != cut2.id
def test_mix_mixed_cut_preserve_id_right(cut1, cut2):
premixed = cut1.append(cut1)
mixed = premixed.mix(cut2, preserve_id="right")
assert mixed.id != premixed.id
assert mixed.id == cut2.id
# ########################################
# ############ PERTURB SPEED #############
# ########################################
def test_cut_perturb_speed_affix_id_true(cut1):
cut_sp = cut1.perturb_speed(1.1)
assert cut_sp.id != cut1.id
def test_cut_perturb_speed_affix_id_false(cut1):
cut_sp = cut1.perturb_speed(1.1, affix_id=False)
assert cut_sp.id == cut1.id
def test_mixed_cut_perturb_speed_affix_id_true(cut1):
premixed = cut1.append(cut1)
cut_sp = premixed.perturb_speed(1.1)
assert cut_sp.id != premixed.id
def test_mixed_cut_perturb_speed_affix_id_false(cut1):
premixed = cut1.append(cut1)
cut_sp = premixed.perturb_speed(1.1, affix_id=False)
assert cut_sp.id == premixed.id
# ########################################
# ############ PERTURB TEMPO #############
# ########################################
def test_cut_perturb_tempo_affix_id_true(cut1):
cut_tp = cut1.perturb_tempo(1.1)
assert cut_tp.id != cut1.id
def test_cut_perturb_tempo_affix_id_false(cut1):
cut_tp = cut1.perturb_tempo(1.1, affix_id=False)
assert cut_tp.id == cut1.id
def test_mixed_cut_perturb_tempo_affix_id_true(cut1):
premixed = cut1.append(cut1)
cut_tp = premixed.perturb_tempo(1.1)
assert cut_tp.id != premixed.id
def test_mixed_cut_perturb_tempo_affix_id_false(cut1):
premixed = cut1.append(cut1)
cut_tp = premixed.perturb_tempo(1.1, affix_id=False)
assert cut_tp.id == premixed.id
# ########################################
# ########### PERTURB VOLUME #############
# ########################################
def test_cut_perturb_volume_affix_id_true(cut1):
cut_vp = cut1.perturb_volume(1.1)
assert cut_vp.id != cut1.id
def test_cut_perturb_volume_affix_id_false(cut1):
cut_vp = cut1.perturb_volume(1.1, affix_id=False)
assert cut_vp.id == cut1.id
def test_mixed_cut_perturb_volume_affix_id_true(cut1):
premixed = cut1.append(cut1)
cut_vp = premixed.perturb_volume(1.1)
assert cut_vp.id != premixed.id
def test_mixed_cut_perturb_volume_affix_id_false(cut1):
premixed = cut1.append(cut1)
cut_vp = premixed.perturb_volume(1.1, affix_id=False)
assert cut_vp.id == premixed.id
# ########################################
# ############## RESAMPLE ################
# ########################################
def test_cut_resample_affix_id_true(cut1):
cut_rs = cut1.resample(44100, affix_id=True)
assert cut_rs.id != cut1.id
def test_cut_resample_affix_id_false(cut1):
cut_rs = cut1.resample(44100)
assert cut_rs.id == cut1.id
def test_mixed_cut_resample_affix_id_true(cut1):
premixed = cut1.append(cut1)
cut_rs = premixed.resample(44100, affix_id=True)
assert cut_rs.id != premixed.id
def test_mixed_cut_resample_affix_id_false(cut1):
premixed = cut1.append(cut1)
cut_rs = premixed.resample(44100)
assert cut_rs.id == premixed.id
| 28.49569 | 118 | 0.638633 | 897 | 6,611 | 4.426979 | 0.063545 | 0.056409 | 0.049862 | 0.077562 | 0.955931 | 0.952909 | 0.88416 | 0.83178 | 0.692773 | 0.609922 | 0 | 0.033664 | 0.146271 | 6,611 | 231 | 119 | 28.619048 | 0.669915 | 0.034034 | 0 | 0.310078 | 0 | 0 | 0.022127 | 0 | 0 | 0 | 0 | 0 | 0.341085 | 1 | 0.248062 | false | 0 | 0.007752 | 0 | 0.255814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cad1c4d020698b01d568f21867033469eb800799 | 5,278 | py | Python | rules.py | Wichy76/wichess | d8f4112fee01416565b7d8828c8e79eeb8cd3947 | [
"MIT"
] | null | null | null | rules.py | Wichy76/wichess | d8f4112fee01416565b7d8828c8e79eeb8cd3947 | [
"MIT"
] | null | null | null | rules.py | Wichy76/wichess | d8f4112fee01416565b7d8828c8e79eeb8cd3947 | [
"MIT"
] | null | null | null | import pieces
import itertools
def valid_pawn_movement(selected_piece, new_pos, current_turn):
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
if piece != (current_turn, pieces.PAWN):
return False
else:
if current_turn == pieces.WHITE:
if old_x // 100 == new_x // 100 and old_y // 100 == (new_y // 100) + 1:
return True
elif old_x // 100 == new_x // 100 and old_y // 100 == 6 and (new_y // 100) == 4:
return True
if current_turn == pieces.BLACK:
if old_x // 100 == new_x // 100 and old_y // 100 == (new_y // 100) - 1:
return True
elif old_x // 100 == new_x // 100 and old_y // 100 == 1 and (new_y // 100) == 3:
return True
return False
def valid_knight_movement(selected_piece, new_pos, current_turn):
possibles = [(-1, -2), (-1, 2), (1, -2), (1, 2), (-2, -1), (-2, 1), (2, -1), (2, 1)]
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
if piece != (current_turn, pieces.KNIGHT):
return False
else:
for posi in possibles:
if old_x // 100 == (new_x // 100) + int(posi[0]) and old_y // 100 == (new_y // 100) + int(posi[1]):
return True
return False
def valid_king_movement(selected_piece, new_pos, current_turn):
possibles = list(itertools.product([-1, 0, 1], [-1, 0, 1]))
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
if piece != (current_turn, pieces.KING):
return False
else:
for posi in possibles:
if old_x // 100 == (new_x // 100) + int(posi[0]) and old_y // 100 == (new_y // 100) + int(posi[1]):
return True
return False
def is_horizontal_or_vertical_move(new_x, new_y, old_x, old_y):
if old_x // 100 == (new_x // 100) and old_y // 100 != (new_y // 100) or old_x // 100 != (
new_x // 100) and old_y // 100 == (new_y // 100):
return True
def valid_rook_movement(selected_piece, new_pos, current_turn, board):
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
if piece != (current_turn, pieces.ROOK):
return False
else:
if is_horizontal_or_vertical_move(new_x, new_y, old_x, old_y):
if have_between_own_pieces_horizontal(new_pos, board, selected_piece) or have_between_own_pieces_vertical(
new_pos, board, selected_piece):
return False
return True
return False
def is_diagonal_move(new_x, new_y, old_x, old_y):
if old_x // 100 - (old_y // 100) == new_x // 100 - (new_y // 100) or old_x // 100 + (
old_y // 100) == new_x // 100 + (new_y // 100):
return True
def valid_bishop_movement(selected_piece, new_pos, current_turn, board):
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
if piece != (current_turn, pieces.BISHOP):
return False
else:
if is_diagonal_move(new_x, new_y, old_x, old_y):
if have_between_own_pieces_diagonal(new_pos, board, selected_piece) :
return False
return True
return False
def valid_queen_movement(selected_piece, new_pos, current_turn, board):
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
if piece != (current_turn, pieces.QUEEN):
return False
else:
if is_diagonal_move(new_x, new_y, old_x, old_y):
if have_between_own_pieces_diagonal(new_pos, board, selected_piece) :
return False
return True
if is_horizontal_or_vertical_move(new_x, new_y, old_x, old_y):
if have_between_own_pieces_horizontal(new_pos, board, selected_piece) or have_between_own_pieces_vertical(
new_pos, board, selected_piece):
return False
return True
return False
def have_between_own_pieces_vertical(new_pos, board, selected_piece):
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
min_y = min(new_y // 100, old_y // 100) + 1
max_y = max(new_y // 100, old_y // 100)
for y in range(min_y, max_y):
if board[old_x // 100][y][0] == piece[0]:
return True
return False
def have_between_own_pieces_horizontal(new_pos, board, selected_piece):
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
min_x = min(new_x // 100, old_x // 100) + 1
max_x = max(new_x // 100, old_x // 100)
for x in range(min_x, max_x):
if board[x][old_y // 100][0] == piece[0]:
return True
return False
def have_between_own_pieces_diagonal(new_pos, board, selected_piece):
piece, old_x, old_y = selected_piece
new_x, new_y = new_pos
min_x = min(new_x // 100, old_x // 100) + 1
max_x = max(new_x // 100, old_x // 100)
if new_y // 100 + new_x // 100 == old_y // 100 + old_x // 100:
for x in range(min_x, max_x):
y = (old_y // 100 + old_x // 100) - x
if board[x][y][0] == piece[0]:
return True
else:
for x in range(min_x, max_x):
y = (old_y // 100 - old_x // 100) + x
if board[x][y][0] == piece[0]:
return True
return False
| 33.833333 | 118 | 0.59208 | 827 | 5,278 | 3.459492 | 0.068924 | 0.046138 | 0.044041 | 0.041943 | 0.900734 | 0.891646 | 0.86683 | 0.848305 | 0.797274 | 0.797274 | 0 | 0.061361 | 0.292914 | 5,278 | 155 | 119 | 34.051613 | 0.705252 | 0 | 0 | 0.680328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090164 | false | 0 | 0.016393 | 0 | 0.393443 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.