hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e5cebb394da49e8ed887f3594adea7b782eab267
| 210
|
py
|
Python
|
cmf_rateslib/products/base_product.py
|
lantalex/cmf_rateslib
|
efefe45bfc349a18a4b318f0d524744e0140e155
|
[
"MIT"
] | 3
|
2021-11-12T16:14:29.000Z
|
2021-12-08T17:44:35.000Z
|
cmf_rateslib/products/base_product.py
|
lantalex/cmf_rateslib
|
efefe45bfc349a18a4b318f0d524744e0140e155
|
[
"MIT"
] | null | null | null |
cmf_rateslib/products/base_product.py
|
lantalex/cmf_rateslib
|
efefe45bfc349a18a4b318f0d524744e0140e155
|
[
"MIT"
] | 13
|
2021-11-09T17:53:51.000Z
|
2021-12-13T11:19:12.000Z
|
class BaseProduct(object):
params: dict = {}
def __init__(self):
pass
def get_cashflows(self, *args, **kwargs):
return None
def pv(self, *args, **kwargs):
return 0
| 14
| 45
| 0.557143
| 24
| 210
| 4.666667
| 0.708333
| 0.142857
| 0.25
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006993
| 0.319048
| 210
| 14
| 46
| 15
| 0.776224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.125
| 0
| 0.25
| 0.875
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
e5fd1c3e2a9590f8542aa241a24dca9e82772723
| 2,621
|
py
|
Python
|
tests/clpy_tests/opencl_tests/ultima_tests/test_ultima_carray.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 142
|
2018-06-07T07:43:10.000Z
|
2021-10-30T21:06:32.000Z
|
tests/clpy_tests/opencl_tests/ultima_tests/test_ultima_carray.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 282
|
2018-06-07T08:35:03.000Z
|
2021-03-31T03:14:32.000Z
|
tests/clpy_tests/opencl_tests/ultima_tests/test_ultima_carray.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 19
|
2018-06-19T11:07:53.000Z
|
2021-05-13T20:57:04.000Z
|
# flake8: noqa
# TODO(vorj): When we will meet flake8 3.7.0+,
# we should ignore only W291 for whole file
# using --per-file-ignores .
import clpy
import unittest
class TestUltimaCArray(unittest.TestCase):
def test_carray_argument_mutation(self):
x = clpy.backend.ultima.exec_ultima('', '#include <cupy/carray.hpp>') + '''
void f(__global int* const __restrict__ arr, const CArray_2 arr_info)
{
}
'''[1:]
y = clpy.backend.ultima.exec_ultima(
'''
void f(CArray<int, 2> arr){}
''',
'#include <cupy/carray.hpp>')
self.maxDiff = None
self.assertEqual(x, y)
def test_carray_member_function(self):
x = clpy.backend.ultima.exec_ultima('', '#include <cupy/carray.hpp>') + '''
void f(__global int* const __restrict__ arr, const CArray_2 arr_info)
{
((const size_t)arr_info.size_);
((const size_t*)arr_info.shape_);
((const size_t*)arr_info.strides_);
}
'''[1:]
y = clpy.backend.ultima.exec_ultima(
'''
void f(CArray<int, 2> arr){
arr.size();
arr.shape();
arr.strides();
}
''',
'#include <cupy/carray.hpp>')
self.maxDiff = None
self.assertEqual(x, y)
def test_carray_0_member_function(self):
x = clpy.backend.ultima.exec_ultima('', '#include <cupy/carray.hpp>') + '''
void f(__global int* const __restrict__ arr, const CArray_0 arr_info)
{
((const size_t)arr_info.size_);
((const size_t*)NULL);
((const size_t*)NULL);
}
'''[1:]
y = clpy.backend.ultima.exec_ultima(
'''
void f(CArray<int, 0> arr){
arr.size();
arr.shape();
arr.strides();
}
''',
'#include <cupy/carray.hpp>')
self.maxDiff = None
self.assertEqual(x, y)
def test_carray_1_member_function(self):
x = clpy.backend.ultima.exec_ultima('', '#include <cupy/carray.hpp>') + '''
void f(__global int* const __restrict__ arr, const CArray_1 arr_info)
{
((const size_t)arr_info.size_);
((const size_t*)&arr_info.shape_);
((const size_t*)&arr_info.strides_);
}
'''[1:]
y = clpy.backend.ultima.exec_ultima(
'''
void f(CArray<int, 1> arr){
arr.size();
arr.shape();
arr.strides();
}
''',
'#include <cupy/carray.hpp>')
self.maxDiff = None
self.assertEqual(x, y)
if __name__ == "__main__":
unittest.main()
| 28.48913
| 83
| 0.546356
| 315
| 2,621
| 4.27619
| 0.203175
| 0.057164
| 0.066815
| 0.124722
| 0.82925
| 0.82925
| 0.82925
| 0.82925
| 0.82925
| 0.82925
| 0
| 0.012028
| 0.302175
| 2,621
| 91
| 84
| 28.802198
| 0.72444
| 0.058756
| 0
| 0.534483
| 0
| 0
| 0.43366
| 0.090346
| 0
| 0
| 0
| 0.010989
| 0.068966
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.12069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
00556e553743d1430d37baf919cb2e6c5d00dd18
| 23,470
|
py
|
Python
|
tests/e2e/basic/performance_tests/wifi_capacity_test/wpa2_personal/test_bridge_mode.py
|
DYeag/wlan-testing
|
81e879d04ea3c6a55d14a330d461d8914507e3b2
|
[
"BSD-3-Clause"
] | 7
|
2020-08-19T16:45:46.000Z
|
2022-02-10T09:55:22.000Z
|
tests/e2e/basic/performance_tests/wifi_capacity_test/wpa2_personal/test_bridge_mode.py
|
DYeag/wlan-testing
|
81e879d04ea3c6a55d14a330d461d8914507e3b2
|
[
"BSD-3-Clause"
] | 47
|
2020-12-20T16:06:03.000Z
|
2022-03-23T03:01:22.000Z
|
tests/e2e/basic/performance_tests/wifi_capacity_test/wpa2_personal/test_bridge_mode.py
|
DYeag/wlan-testing
|
81e879d04ea3c6a55d14a330d461d8914507e3b2
|
[
"BSD-3-Clause"
] | 9
|
2021-02-04T22:32:06.000Z
|
2021-12-14T17:45:51.000Z
|
"""
Performance Test: Wifi Capacity Test : BRIDGE Mode
pytest -m "wifi_capacity_test and BRIDGE"
"""
import os
import pytest
import allure
pytestmark = [pytest.mark.performance, pytest.mark.bridge]
# """pytest.mark.usefixtures("setup_test_run")"""]
setup_params_general_dual_band = {
"mode": "BRIDGE",
"ssid_modes": {
"wpa2_personal": [
{"ssid_name": "ssid_wpa2_dual_band", "appliedRadios": ["2G", "5G"], "security_key": "something"}
]
},
"rf": {},
"radius": False
}
@allure.feature("BRIDGE MODE CLIENT CONNECTIVITY")
@pytest.mark.parametrize(
'setup_profiles',
[setup_params_general_dual_band],
indirect=True,
scope="class"
)
@pytest.mark.usefixtures("setup_profiles")
@pytest.mark.bridge
@pytest.mark.twog
@pytest.mark.fiveg
@pytest.mark.dual_band
@pytest.mark.wpa2_personal
@pytest.mark.wifi_capacity_test
class TestWifiCapacityBRIDGEModeDualBand(object):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE"
"""
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3926", name="WIFI-3926")
@pytest.mark.tcp_download
def test_client_wpa2_BRIDGE_tcp_dl(self, get_vif_state, lf_tools, setup_profiles,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
lf_tools.reset_scenario()
profile_data = setup_params_general_dual_band["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
get_vif_state.append(ssid_name)
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="2G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="5G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
# lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
influx_tags = ["tcp", "download", "2.4G-5G Combined"]
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_tcp_dl", mode=mode, vlan_id=vlan,
download_rate="1Gbps", batch_size="1,5,10,20,40,64,128,256",
influx_tags=influx_tags,
upload_rate="0", protocol="TCP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3927", name="WIFI-3927")
@pytest.mark.udp_download
def test_client_wpa2_BRIDGE_udp_dl(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
lf_tools.reset_scenario()
profile_data = setup_params_general_dual_band["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
get_vif_state.append(ssid_name)
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="2G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="5G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
# lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
influx_tags = ["udp", "download", "2.4G-5G Combined"]
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_udp_dl", mode=mode, vlan_id=vlan,
download_rate="1Gbps", batch_size="1,5,10,20,40,64,128,256",
influx_tags=influx_tags,
upload_rate="0", protocol="UDP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3932", name="WIFI-3932")
@pytest.mark.tcp_bidirectional
def test_client_wpa2_BRIDGE_tcp_bidirectional(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
lf_tools.reset_scenario()
profile_data = setup_params_general_dual_band["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="2G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="5G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
# lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
influx_tags = ["tcp", "bidirectional", "2.4G-5G Combined"]
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_tcp_bi", mode=mode, vlan_id=vlan,
download_rate="1Gbps", batch_size="1,5,10,20,40,64,128,256",
influx_tags=influx_tags,
upload_rate="1Gbps", protocol="TCP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3933", name="WIFI-3933")
@pytest.mark.udp_bidirectional
def test_client_wpa2_BRIDGE_udp_bidirectional(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
lf_tools.reset_scenario()
profile_data = setup_params_general_dual_band["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="2G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="5G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
# lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
influx_tags = ["udp", "bidirectional", "2.4G-5G Combined"]
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_udp_bi", mode=mode, vlan_id=vlan,
download_rate="1Gbps", batch_size="1,5,10,20,40,64,128,256",
influx_tags=influx_tags,
upload_rate="1Gbps", protocol="UDP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
setup_params_general_2G = {
"mode": "BRIDGE",
"ssid_modes": {
"wpa2_personal": [
{"ssid_name": "ssid_wpa2_2g", "appliedRadios": ["2G"], "security_key": "something"}
]
},
"rf": {},
"radius": False
}
@allure.feature("BRIDGE MODE CLIENT CONNECTIVITY")
@pytest.mark.parametrize(
'setup_profiles',
[setup_params_general_2G],
indirect=True,
scope="class"
)
@pytest.mark.usefixtures("setup_profiles")
@pytest.mark.wpa2_personal
@pytest.mark.twog
@pytest.mark.twog_band
class TestWifiCapacityBRIDGEMode2G(object):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE"
"""
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3928", name="WIFI-3928")
@pytest.mark.tcp_download
def test_client_wpa2_BRIDGE_tcp_dl(self, get_vif_state, lf_tools, setup_profiles,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
profile_data = setup_params_general_2G["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="2G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_tcp_dl", mode=mode, vlan_id=vlan,
download_rate="1Gbps",
upload_rate="0", protocol="TCP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3930", name="WIFI-3930")
@pytest.mark.udp_download
def test_client_wpa2_BRIDGE_udp_dl(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
profile_data = setup_params_general_2G["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="2G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_udp_dl", mode=mode, vlan_id=vlan,
download_rate="1Gbps",
upload_rate="0", protocol="UDP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3934", name="WIFI-3934")
@pytest.mark.tcp_bidirectional
def test_client_wpa2_BRIDGE_tcp_bidirectional(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
profile_data = setup_params_general_2G["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="2G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_tcp_bi", mode=mode, vlan_id=vlan,
download_rate="1Gbps",
upload_rate="1Gbps", protocol="TCP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3935", name="WIFI-3935")
@pytest.mark.udp_bidirectional
def test_client_wpa2_BRIDGE_udp_bidirectional(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
profile_data = setup_params_general_2G["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="2G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_udp_bi", mode=mode, vlan_id=vlan,
download_rate="1Gbps",
upload_rate="1Gbps", protocol="UDP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
setup_params_general_5G = {
"mode": "BRIDGE",
"ssid_modes": {
"wpa2_personal": [
{"ssid_name": "ssid_wpa2_5g", "appliedRadios": ["5G"], "security_key": "something"}
]
},
"rf": {},
"radius": False
}
@allure.feature("BRIDGE MODE CLIENT CONNECTIVITY")
@pytest.mark.parametrize(
'setup_profiles',
[setup_params_general_5G],
indirect=True,
scope="class"
)
@pytest.mark.usefixtures("setup_profiles")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.fiveg_band
class TestWifiCapacityBRIDGEMode5G(object):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE"
"""
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3929", name="WIFI-3929")
@pytest.mark.tcp_download
def test_client_wpa2_BRIDGE_tcp_dl(self, get_vif_state, lf_tools, setup_profiles,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
profile_data = setup_params_general_5G["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="5G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_tcp_dl", mode=mode, vlan_id=vlan,
download_rate="1Gbps",
upload_rate="0", protocol="TCP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3931", name="WIFI-3931")
@pytest.mark.udp_download
def test_client_wpa2_BRIDGE_udp_dl(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
profile_data = setup_params_general_5G["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="5G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_udp_dl", mode=mode, vlan_id=vlan,
download_rate="1Gbps",
upload_rate="0", protocol="UDP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3936", name="WIFI-3936")
@pytest.mark.tcp_bidirectional
def test_client_wpa2_BRIDGE_tcp_bidirectional(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
profile_data = setup_params_general_5G["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="5G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_tcp_bi", mode=mode, vlan_id=vlan,
download_rate="1Gbps",
upload_rate="1Gbps", protocol="TCP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-3937", name="WIFI-3937")
@pytest.mark.udp_bidirectional
def test_client_wpa2_BRIDGE_udp_bidirectional(self, get_vif_state, lf_tools,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
""" Wifi Capacity Test BRIDGE mode
pytest -m "wifi_capacity_test and BRIDGE and wpa2_personal and twog"
"""
profile_data = setup_params_general_5G["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
mode = "BRIDGE"
vlan = 1
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
lf_tools.add_stations(band="5G", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.add_stations(band="ax", num_stations="max", dut=lf_tools.dut_name, ssid_name=ssid_name)
lf_tools.Chamber_View()
wct_obj = lf_test.wifi_capacity(instance_name="test_client_wpa2_BRIDGE_udp_bi", mode=mode, vlan_id=vlan,
download_rate="1Gbps",
upload_rate="1Gbps", protocol="UDP-IPv4", duration="60000")
report_name = wct_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
lf_tools.attach_report_graphs(report_name=report_name)
print("Test Completed... Cleaning up Stations")
assert True
| 51.356674
| 112
| 0.633404
| 2,973
| 23,470
| 4.678776
| 0.049781
| 0.055787
| 0.048311
| 0.036233
| 0.963911
| 0.954278
| 0.951977
| 0.951977
| 0.951977
| 0.951977
| 0
| 0.023942
| 0.250788
| 23,470
| 456
| 113
| 51.469298
| 0.767118
| 0.083042
| 0
| 0.860399
| 0
| 0
| 0.196551
| 0.021413
| 0
| 0
| 0
| 0
| 0.034188
| 1
| 0.034188
| false
| 0
| 0.008547
| 0
| 0.051282
| 0.034188
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
00857be160580391324499add35f7d9c03e38fb6
| 604
|
py
|
Python
|
spine_json_lib/data/spine_exceptions.py
|
ivan-ah/spine-json-lib
|
1ea8460127f005d57af56090d2a48e6039437306
|
[
"MIT"
] | 6
|
2019-12-02T15:25:57.000Z
|
2021-11-02T04:14:19.000Z
|
spine_json_lib/data/spine_exceptions.py
|
ivan-ah/spine-json-lib
|
1ea8460127f005d57af56090d2a48e6039437306
|
[
"MIT"
] | 3
|
2020-03-20T11:09:22.000Z
|
2022-02-18T10:07:26.000Z
|
spine_json_lib/data/spine_exceptions.py
|
ivan-ah/spine-json-lib
|
1ea8460127f005d57af56090d2a48e6039437306
|
[
"MIT"
] | 2
|
2019-12-02T14:56:50.000Z
|
2020-02-24T07:53:20.000Z
|
class SpineParsingException(Exception):
def __init__(self, message, code_error=None, *args, **kwargs):
self.message = message
self.code_error = code_error
super(SpineParsingException, self).__init__(*args, **kwargs)
def __str__(self):
return str(self.message)
class SpineJsonEditorError(Exception):
def __init__(self, message, code_error=None, *args, **kwargs):
self.message = message
self.code_error = code_error
super(SpineJsonEditorError, self).__init__(*args, **kwargs)
def __str__(self):
return str(self.message)
| 28.761905
| 68
| 0.675497
| 66
| 604
| 5.727273
| 0.242424
| 0.174603
| 0.084656
| 0.10582
| 0.756614
| 0.756614
| 0.756614
| 0.756614
| 0.756614
| 0.756614
| 0
| 0
| 0.211921
| 604
| 20
| 69
| 30.2
| 0.794118
| 0
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
00a195019fe8d6cd83c8bc33244c586ebbbc306b
| 2,635
|
py
|
Python
|
exp_configs/__init__.py
|
dongzhuoyao/embedding-propagation
|
1f14947bbd8be8a9950e7c4093fbfed0536809b9
|
[
"Apache-2.0"
] | null | null | null |
exp_configs/__init__.py
|
dongzhuoyao/embedding-propagation
|
1f14947bbd8be8a9950e7c4093fbfed0536809b9
|
[
"Apache-2.0"
] | null | null | null |
exp_configs/__init__.py
|
dongzhuoyao/embedding-propagation
|
1f14947bbd8be8a9950e7c4093fbfed0536809b9
|
[
"Apache-2.0"
] | null | null | null |
from . import pretrain_exps, ssl_exps
from . import pretrain_exps
from . import pretrain_miniin_wrn_exps
from . import pretrain_miniin_wrn50_2_exps
from . import pretrain_miniin_resnet12_exps
from . import pretrain_miniin_resnet50_exps
from . import pretrain_miniin_densenet121_exps
from . import pretrain_tieredin_wrn_exps
from . import finetune_exps
from . import finetune_miniin_wrn_exps
from . import finetune_miniin_resnet12_exps
from . import finetune_tieredin_wrn_exps
from . import finetune_miniin_wrn50_2_exps
from . import ssl_large_miniin_wrn_exps
from . import ssl_large_inductive_miniin_wrn_exps
from . import ssl_large_inductive_tieredin_wrn_exps
from . import ssl_large_inductive_miniin_cars_wrn_exps
from . import ssl_large_inductive_miniin_cub_wrn_exps
from . import ssl_large_inductive_miniin_places_wrn_exps
from . import ssl_large_inductive_miniin_plantae_wrn_exps
from . import ssl_large_inductive_tieredin_cars_wrn_exps
from . import ssl_large_inductive_tieredin_cub_wrn_exps
from . import ssl_large_inductive_tieredin_places_wrn_exps
from . import ssl_large_inductive_tieredin_plantae_wrn_exps
EXP_GROUPS = {}
EXP_GROUPS = pretrain_exps.EXP_GROUPS
EXP_GROUPS.update(ssl_exps.EXP_GROUPS)
EXP_GROUPS.update(pretrain_miniin_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(pretrain_miniin_wrn50_2_exps.EXP_GROUPS)
EXP_GROUPS.update(pretrain_miniin_resnet12_exps.EXP_GROUPS)
EXP_GROUPS.update(pretrain_miniin_resnet50_exps.EXP_GROUPS)
EXP_GROUPS.update(pretrain_miniin_densenet121_exps.EXP_GROUPS)
EXP_GROUPS.update(pretrain_tieredin_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(finetune_exps.EXP_GROUPS)
EXP_GROUPS.update(finetune_miniin_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(finetune_miniin_resnet12_exps.EXP_GROUPS)
EXP_GROUPS.update(finetune_miniin_wrn50_2_exps.EXP_GROUPS)
EXP_GROUPS.update(finetune_tieredin_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_miniin_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_miniin_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_tieredin_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_miniin_cars_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_miniin_cub_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_miniin_places_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_miniin_plantae_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_tieredin_cars_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_tieredin_cub_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_tieredin_places_wrn_exps.EXP_GROUPS)
EXP_GROUPS.update(ssl_large_inductive_tieredin_plantae_wrn_exps.EXP_GROUPS)
| 36.597222
| 75
| 0.897533
| 413
| 2,635
| 5.135593
| 0.05569
| 0.207921
| 0.15323
| 0.181047
| 0.942008
| 0.8529
| 0.78265
| 0.736445
| 0.367751
| 0.296558
| 0
| 0.012063
| 0.056167
| 2,635
| 71
| 76
| 37.112676
| 0.840772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.489796
| 0
| 0.489796
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
00ae9c03e4d6d1585e5b80aafb9680daa464fc7f
| 20,925
|
py
|
Python
|
spytest/spytest/prompts.py
|
mykolaf/sonic-mgmt
|
de77268526173c5e3a345f3f3703b56eb40c5eed
|
[
"Apache-2.0"
] | 1
|
2021-09-15T17:09:13.000Z
|
2021-09-15T17:09:13.000Z
|
spytest/spytest/prompts.py
|
mykolaf/sonic-mgmt
|
de77268526173c5e3a345f3f3703b56eb40c5eed
|
[
"Apache-2.0"
] | 1
|
2020-02-05T16:51:53.000Z
|
2020-02-05T16:51:53.000Z
|
spytest/spytest/prompts.py
|
mykolaf/sonic-mgmt
|
de77268526173c5e3a345f3f3703b56eb40c5eed
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import logging
from spytest.dicts import SpyTestDict
from spytest.ordyaml import OrderedYaml
prompts_root = os.path.join(os.path.dirname(__file__), '..', "datastore", "prompts")
class Prompts(object):
"""
todo: Update Documentation
"""
def __init__(self, model=None, logger=None):
"""
Construction of Prompts object
:param logger:
:type logger:
"""
self.logger = logger or logging.getLogger()
self.oyaml = None
model = "sonic" if not model else re.sub("_(ssh|terminal)$", "", model)
filename = "{}_prompts.yaml".format(model)
filename = os.path.join(os.path.abspath(prompts_root), filename)
self.oyaml = OrderedYaml(filename,[])
prompts_file_data = self.oyaml.get_data() or dict()
self.patterns = prompts_file_data.patterns if "patterns" in prompts_file_data else SpyTestDict()
self.modes = prompts_file_data.modes if "modes" in prompts_file_data else SpyTestDict()
self.required_args = prompts_file_data.required_args if "required_args" in prompts_file_data else SpyTestDict()
self.sudo_include_prompts = prompts_file_data.sudo_include_prompts if "sudo_include_prompts" in prompts_file_data else []
self.do_exclude_prompts = prompts_file_data.do_exclude_prompts if "do_exclude_prompts" in prompts_file_data else []
self.stored_values = SpyTestDict()
def __del__(self):
pass
def update_with_hostname(self, hostname):
for pattern in self.patterns:
if re.search(r"{}", self.patterns[pattern]):
#print("Matched Pattern: '{}' : '{}' : '{}'".format(pattern, self.patterns[pattern], self.patterns[pattern].format(hostname)))
self.patterns[pattern] = re.sub(r"{}", hostname, self.patterns[pattern])
def get_mode_for_prompt(self, prompt):
prompt2 = prompt.replace("\\", "")
for mode in self.patterns:
lpattern = self.patterns[mode]
if re.search(lpattern, prompt2):
return mode
return "unknown-prompt"
def get_prompt_for_mode(self, mode):
if mode in self.patterns:
return self.patterns[mode]
return "unknown-mode"
def check_args_for_req_mode(self, mode, **kwargs):
missing_args_flag = 0
args_str = ""
if mode in self.required_args:
if mode == "vtysh-router-config":
if "router" not in kwargs.keys():
missing_args_flag = 1
args_str = ", ".join(self.required_args[mode])
elif kwargs["router"] in ["bgp", "eigrp", "isis", "openfabric", "ospf"]:
if "instance" not in kwargs.keys():
missing_args_flag = 1
args_str = ", ".join(self.required_args[mode])
elif mode == "vtysh-router-af-config" and "addr_family" not in kwargs.keys():
missing_args_flag = 1
args_str = ", ".join(self.required_args[mode])
else:
for arg in self.required_args[mode]:
if arg not in kwargs.keys():
missing_args_flag = 1
args_str = ", ".join(self.required_args[mode])
break
if missing_args_flag:
msg = "{} option(s) must be provided for {}.".format(args_str, mode)
raise ValueError(msg)
return
def check_move_for_parent_of_frommode(self, prompt, mode, **kwargs):
if mode == "vtysh-intf-config":
return True
if mode == "vtysh-router-config":
if "router" not in self.stored_values:
self.stored_values["router"] = kwargs["router"]
return False
else:
if self.stored_values["router"] != kwargs["router"]:
self.stored_values["router"] = kwargs["router"]
return True
if mode == "mgmt-ipv4-acl-config":
if "aclname" not in self.stored_values:
self.stored_values["aclname"] = kwargs["aclname"]
return False
else:
if self.stored_values["aclname"] != kwargs["aclname"]:
self.stored_values["aclname"] = kwargs["aclname"]
return True
if mode == "mgmt-evpn-view":
if "evpnname" not in self.stored_values:
self.stored_values["evpnname"] = kwargs["evpnname"]
return False
else:
if self.stored_values["evpnname"] != kwargs["evpnname"]:
self.stored_values["evpnname"] = kwargs["evpnname"]
return True
if mode == "mgmt-bfd-peer-view":
if "peer_ip" not in self.stored_values:
self.stored_values["peer_ip"] = kwargs["peer_ip"]
return False
else:
if self.stored_values["peer_ip"] != kwargs["peer_ip"]:
self.stored_values["peer_ip"] = kwargs["peer_ip"]
return True
if mode == "mgmt-route-map-view":
if "map_name" not in self.stored_values:
self.stored_values["map_name"] = kwargs["map_name"]
self.stored_values["action"] = kwargs["action"]
self.stored_values["seq_num"] = kwargs["seq_num"]
return False
else:
if self.stored_values["map_name"] != kwargs["map_name"] or \
self.stored_values["action"] != kwargs["action"] or \
self.stored_values["seq_num"] != kwargs["seq_num"]:
self.stored_values["map_name"] = kwargs["map_name"]
self.stored_values["action"] = kwargs["action"]
self.stored_values["seq_num"] = kwargs["seq_num"]
return True
if mode == "mgmt-link-state-track-view":
if "track_name" not in self.stored_values:
self.stored_values["track_name"] = kwargs["track_name"]
return False
else:
if self.stored_values["track_name"] != kwargs["track_name"]:
self.stored_values["track_name"] = kwargs["track_name"]
return True
if mode == "mgmt-router-bgp-view":
if "bgp_instance" not in self.stored_values:
self.stored_values["bgp_instance"] = kwargs["bgp_instance"]
self.stored_values["bgp_vrf_name"] = kwargs["bgp_vrf_name"]
return False
else:
if self.stored_values["bgp_instance"] != kwargs["bgp_instance"] or \
self.stored_values["bgp_vrf_name"] != kwargs["bgp_vrf_name"]:
self.stored_values["bgp_instance"] = kwargs["bgp_instance"]
self.stored_values["bgp_vrf_name"] = kwargs["bgp_vrf_name"]
return True
if mode == "mgmt-router-bgp-af-view":
if "af_type" not in self.stored_values:
self.stored_values["af_type"] = kwargs["af_type"]
self.stored_values["af_family"] = kwargs["af_family"]
return False
else:
if self.stored_values["af_type"] != kwargs["af_type"] or \
self.stored_values["af_family"] != kwargs["af_family"]:
self.stored_values["af_type"] = kwargs["af_type"]
self.stored_values["af_family"] = kwargs["af_family"]
return True
if mode == "mgmt-router-bgp-nbr-view":
if "ip_address" not in self.stored_values:
self.stored_values["ip_address"] = kwargs["ip_address"]
return False
else:
if self.stored_values["ip_address"] != kwargs["ip_address"]:
self.stored_values["ip_address"] = kwargs["ip_address"]
return True
if mode == "mgmt-router-bgp-nbr-af-view":
if "nbr_af_type" not in self.stored_values:
self.stored_values["nbr_af_type"] = kwargs["nbr_af_type"]
self.stored_values["nbr_af_family"] = kwargs["nbr_af_family"]
return False
else:
if self.stored_values["nbr_af_type"] != kwargs["nbr_af_type"] or \
self.stored_values["nbr_af_family"] != kwargs["nbr_af_family"]:
self.stored_values["nbr_af_type"] = kwargs["nbr_af_type"]
self.stored_values["nbr_af_family"] = kwargs["nbr_af_family"]
return True
if mode == "mgmt-router-bgp-template-view":
if "group_name" not in self.stored_values:
self.stored_values["group_name"] = kwargs["group_name"]
return False
else:
if self.stored_values["group_name"] != kwargs["group_name"]:
self.stored_values["group_name"] = kwargs["group_name"]
return True
if mode == "mgmt-router-bgp-template-af-view":
if "tpl_af_type" not in self.stored_values:
self.stored_values["tpl_af_type"] = kwargs["tpl_af_type"]
self.stored_values["tpl_af_family"] = kwargs["tpl_af_family"]
return False
else:
if self.stored_values["tpl_af_type"] != kwargs["tpl_af_type"] or \
self.stored_values["tpl_af_family"] != kwargs["tpl_af_family"]:
self.stored_values["tpl_af_type"] = kwargs["tpl_af_type"]
self.stored_values["tpl_af_family"] = kwargs["tpl_af_family"]
return True
if mode == "mgmt-router-bgp-l2vpn-vni-view":
if "vxlan_id" not in self.stored_values:
self.stored_values["vxlan_id"] = kwargs["vxlan_id"]
return False
else:
if self.stored_values["vxlan_id"] != kwargs["vxlan_id"]:
self.stored_values["vxlan_id"] = kwargs["vxlan_id"]
return True
if mode == "mgmt-intf-config":
prompt2 = prompt.replace("\\", "")
intfNum = "-{})".format(kwargs["interface"])
if intfNum in prompt2:
return False
else:
return True
if mode == "mgmt-vlan-config":
prompt2 = prompt.replace("\\", "")
intfNum = "-Vlan{})".format(kwargs["vlan"])
if intfNum in prompt2:
return False
else:
return True
if mode == "mgmt-lag-config":
prompt2 = prompt.replace("\\", "")
intfNum = "-po{})".format(kwargs["portchannel"])
if intfNum in prompt2:
return False
else:
return True
if mode == "mgmt-management-config":
prompt2 = prompt.replace("\\", "")
intfNum = "-eth{})".format(kwargs["management"])
if intfNum in prompt2:
return False
else:
return True
if mode == "mgmt-vxlan-view":
prompt2 = prompt.replace("\\", "")
intfNum = "-Vxlan-{})".format(kwargs["vxlan"])
if intfNum in prompt2:
return False
else:
return True
if mode == "mgmt-mirror-session-config":
prompt2 = prompt.replace("\\", "")
intfNum = "-mirror-{})".format(kwargs["session_name"])
if intfNum in prompt2:
return False
else:
return True
if mode == "mgmt-mclag-view":
prompt2 = prompt.replace("\\", "")
intfNum = "mclag-domain-{})".format(kwargs["domain_id"])
if intfNum in prompt2:
return False
else:
return True
if mode == "mgmt-lo-view":
prompt2 = prompt.replace("\\", "")
intfNum = "-lo{})".format(kwargs["loopback_id"])
if intfNum in prompt2:
return False
else:
return True
return False
def check_move_for_parent_of_tomode(self, prompt, mode, **kwargs):
check_for_parents = False
if mode == "vtysh-router-config":
if "router" not in self.stored_values:
self.stored_values["router"] = kwargs["router"]
return False
else:
if self.stored_values["router"] != kwargs["router"]:
self.stored_values["router"] = kwargs["router"]
check_for_parents = True
if mode == "vtysh-router-af-config":
if "router" in kwargs:
if "router" not in self.stored_values:
self.stored_values["router"] = kwargs["router"]
return False
else:
if self.stored_values["router"] != kwargs["router"]:
self.stored_values["router"] = kwargs["router"]
check_for_parents = True
if mode == "mgmt-ipv4-acl-config":
if "aclname" not in self.stored_values:
self.stored_values["aclname"] = kwargs["aclname"]
return False
else:
if self.stored_values["aclname"] != kwargs["aclname"]:
self.stored_values["aclname"] = kwargs["aclname"]
if mode == "mgmt-evpn-view":
if "evpnname" not in self.stored_values:
self.stored_values["evpnname"] = kwargs["evpnname"]
return False
else:
if self.stored_values["evpnname"] != kwargs["evpnname"]:
self.stored_values["evpnname"] = kwargs["evpnname"]
return True
if mode == "mgmt-bfd-peer-view":
if "peer_ip" not in self.stored_values:
self.stored_values["peer_ip"] = kwargs["peer_ip"]
return False
else:
if self.stored_values["peer_ip"] != kwargs["peer_ip"]:
self.stored_values["peer_ip"] = kwargs["peer_ip"]
return True
if mode == "mgmt-route-map-view":
if "map_name" not in self.stored_values:
self.stored_values["map_name"] = kwargs["map_name"]
self.stored_values["action"] = kwargs["action"]
self.stored_values["seq_num"] = kwargs["seq_num"]
return False
else:
if self.stored_values["map_name"] != kwargs["map_name"] or \
self.stored_values["action"] != kwargs["action"] or \
self.stored_values["seq_num"] != kwargs["seq_num"]:
self.stored_values["map_name"] = kwargs["map_name"]
self.stored_values["action"] = kwargs["action"]
self.stored_values["seq_num"] = kwargs["seq_num"]
return True
if mode == "mgmt-link-state-track-view":
if "track_name" not in self.stored_values:
self.stored_values["track_name"] = kwargs["track_name"]
return False
else:
if self.stored_values["track_name"] != kwargs["track_name"]:
self.stored_values["track_name"] = kwargs["track_name"]
return True
if mode == "mgmt-router-bgp-view":
if "bgp_instance" not in self.stored_values:
self.stored_values["bgp_instance"] = kwargs["bgp_instance"]
self.stored_values["bgp_vrf_name"] = kwargs["bgp_vrf_name"]
return False
else:
if self.stored_values["bgp_instance"] != kwargs["bgp_instance"] or \
self.stored_values["bgp_vrf_name"] != kwargs["bgp_vrf_name"]:
self.stored_values["bgp_instance"] = kwargs["bgp_instance"]
self.stored_values["bgp_vrf_name"] = kwargs["bgp_vrf_name"]
return True
if mode == "mgmt-router-bgp-af-view":
if "af_type" not in self.stored_values:
self.stored_values["af_type"] = kwargs["af_type"]
self.stored_values["af_family"] = kwargs["af_family"]
return False
else:
if self.stored_values["af_type"] != kwargs["af_type"] or \
self.stored_values["af_family"] != kwargs["af_family"]:
self.stored_values["af_type"] = kwargs["af_type"]
self.stored_values["af_family"] = kwargs["af_family"]
return True
if mode == "mgmt-router-bgp-nbr-view":
if "ip_address" not in self.stored_values:
self.stored_values["ip_address"] = kwargs["ip_address"]
return False
else:
if self.stored_values["ip_address"] != kwargs["ip_address"]:
self.stored_values["ip_address"] = kwargs["ip_address"]
return True
if mode == "mgmt-router-bgp-nbr-af-view":
if "nbr_af_type" not in self.stored_values:
self.stored_values["nbr_af_type"] = kwargs["nbr_af_type"]
self.stored_values["nbr_af_family"] = kwargs["nbr_af_family"]
return False
else:
if self.stored_values["nbr_af_type"] != kwargs["nbr_af_type"] or \
self.stored_values["nbr_af_family"] != kwargs["nbr_af_family"]:
self.stored_values["nbr_af_type"] = kwargs["nbr_af_type"]
self.stored_values["nbr_af_family"] = kwargs["nbr_af_family"]
return True
if mode == "mgmt-router-bgp-template-view":
if "group_name" not in self.stored_values:
self.stored_values["group_name"] = kwargs["group_name"]
return False
else:
if self.stored_values["group_name"] != kwargs["group_name"]:
self.stored_values["group_name"] = kwargs["group_name"]
return True
if mode == "mgmt-router-bgp-template-af-view":
if "tpl_af_type" not in self.stored_values:
self.stored_values["tpl_af_type"] = kwargs["tpl_af_type"]
self.stored_values["tpl_af_family"] = kwargs["tpl_af_family"]
return False
else:
if self.stored_values["tpl_af_type"] != kwargs["tpl_af_type"] or \
self.stored_values["tpl_af_family"] != kwargs["tpl_af_family"]:
self.stored_values["tpl_af_type"] = kwargs["tpl_af_type"]
self.stored_values["tpl_af_family"] = kwargs["tpl_af_family"]
return True
if mode == "mgmt-router-bgp-l2vpn-vni-view":
if "vxlan_id" not in self.stored_values:
self.stored_values["vxlan_id"] = kwargs["vxlan_id"]
return False
else:
if self.stored_values["vxlan_id"] != kwargs["vxlan_id"]:
self.stored_values["vxlan_id"] = kwargs["vxlan_id"]
return True
if check_for_parents:
parent_modes_list = []
curr_mode = self.get_mode_for_prompt(prompt)
while True:
parent_modes_list.append(self.modes[curr_mode][0])
curr_mode = self.modes[curr_mode][0]
if curr_mode == "":
break
if mode in parent_modes_list:
return True
return False
def get_backward_command_and_prompt(self, mode):
if mode not in self.modes:
return ["", ""]
cmd = self.modes[mode][2]
expected_prompt = self.get_prompt_for_mode(self.modes[mode][0])
return [cmd, expected_prompt]
def get_forward_command_and_prompt_with_values(self, mode, **kwargs):
if mode not in self.modes:
return ["", ""]
cmd = self.modes[mode][1]
expected_prompt = self.get_prompt_for_mode(mode)
if mode in self.required_args:
values = []
for arg in self.required_args[mode]:
if arg in kwargs.keys():
if mode == "mgmt-intf-config" and arg == "interface":
intf_value = re.sub("Ethernet", "Ethernet ", kwargs[arg])
values.append(intf_value)
else:
values.append(kwargs[arg])
else:
values.append("")
cmd = cmd.format(*values)
return [cmd, expected_prompt]
| 43.960084
| 142
| 0.533333
| 2,310
| 20,925
| 4.589177
| 0.074892
| 0.13678
| 0.218847
| 0.046788
| 0.793321
| 0.747382
| 0.738515
| 0.714744
| 0.714744
| 0.705594
| 0
| 0.00235
| 0.349247
| 20,925
| 475
| 143
| 44.052632
| 0.776162
| 0.010131
| 0
| 0.774272
| 0
| 0
| 0.176502
| 0.022946
| 0
| 0
| 0
| 0.002105
| 0
| 1
| 0.024272
| false
| 0.002427
| 0.012136
| 0
| 0.23301
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
00b050c0564ef1f8174af49b425b79f50122314d
| 17,462
|
py
|
Python
|
tests/test_modis.py
|
up42/modis
|
05df85d606abe60cf101efa522a31a6bd6479bda
|
[
"MIT"
] | 4
|
2020-03-17T22:10:29.000Z
|
2021-08-05T11:34:45.000Z
|
tests/test_modis.py
|
up42/modis
|
05df85d606abe60cf101efa522a31a6bd6479bda
|
[
"MIT"
] | 11
|
2019-08-16T08:30:42.000Z
|
2022-03-12T00:11:12.000Z
|
tests/test_modis.py
|
up42/modis
|
05df85d606abe60cf101efa522a31a6bd6479bda
|
[
"MIT"
] | 1
|
2021-05-06T12:24:42.000Z
|
2021-05-06T12:24:42.000Z
|
"""
Integration tests for the higher-level fetch methods
"""
# pylint: disable=unused-import, redefined-outer-name
# requests_mock used as fixture in tests
import os
import re
import rasterio as rio
import numpy as np
import pytest
from rio_cogeo.cogeo import cog_validate
from context import STACQuery, Modis
from blockutils.exceptions import UP42Error
@pytest.fixture()
def modis_instance():
return Modis(default_zoom_level=9)
@pytest.mark.live
def test_aoiclipped_fetcher_fetch_in_dry_run_mode(modis_instance):
"""
Test for dry-run mode i.e. only metadata is returned
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2018-11-01T16:40:49+00:00/2018-11-20T16:41:49+00:00",
"limit": 1,
"bbox": [
123.59349578619005,
-10.188159969024264,
123.70257586240771,
-10.113232998848046,
],
"imagery_layers": ["MODIS_Terra_CorrectedReflectance_TrueColor"],
}
)
result = modis_instance.fetch(query, dry_run=True)
assert len(result.features) == 1
assert "up42.data_path" not in result.features[0]["properties"].keys()
assert os.path.isfile(f"/tmp/quicklooks/{result.features[0]['id']}.jpg")
@pytest.mark.live
def test_aoiclipped_fetcher_multiple_fetch_in_dry_run_mode(modis_instance):
"""
Test for dry-run mode i.e. only metadata is returned, multiple imagery_layers
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2018-11-01T16:40:49+00:00/2018-11-20T16:41:49+00:00",
"limit": 1,
"bbox": [
123.59349578619005,
-10.188159969024264,
123.70257586240771,
-10.113232998848046,
],
"imagery_layers": [
"MODIS_Terra_CorrectedReflectance_TrueColor",
"MODIS_Aqua_CorrectedReflectance_TrueColor",
],
}
)
result = modis_instance.fetch(query, dry_run=True)
assert len(result.features) == 1
assert "up42.data_path" not in result.features[0]["properties"].keys()
assert os.path.isfile(f"/tmp/quicklooks/{result.features[0]['id']}.jpg")
@pytest.mark.live
def test_aoiclipped_fetcher_layer_error_fetch_in_dry_run_mode(modis_instance):
"""
Test for dry-run mode i.e. only metadata is returned, error in name of layer
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2018-11-01T16:40:49+00:00/2018-11-20T16:41:49+00:00",
"limit": 1,
"bbox": [
123.59349578619005,
-10.188159969024264,
123.70257586240771,
-10.113232998848046,
],
"imagery_layers": [
"MODIS_Terra_CorrectedReflectance_TrueColor",
"AN_ERROR_FOR_SURE",
],
}
)
with pytest.raises(UP42Error, match=r".*['AN_ERROR_FOR_SURE'].*"):
modis_instance.fetch(query, dry_run=True)
@pytest.mark.live
def test_aoiclipped_fetcher_geom_error_fetch_in_dry_run_mode(modis_instance):
"""
Test for dry-run mode i.e. only metadata is returned, error in geometry
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2018-11-01T16:40:49+00:00/2018-11-20T16:41:49+00:00",
"limit": 1,
"bbox": [200, 200, 210, 210],
"imagery_layers": ["MODIS_Terra_CorrectedReflectance_TrueColor"],
}
)
with pytest.raises(UP42Error):
modis_instance.fetch(query, dry_run=True)
def test_aoiclipped_dry_run_only_bbox(requests_mock, modis_instance):
"""
Mocked test for fetching data with only bbox param
"""
_location_ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(
os.path.join(_location_, "mock_data/available_imagery_layers.xml"), "rb"
) as xml_file:
mock_xml: object = xml_file.read()
with open(os.path.join(_location_, "mock_data/tile.jpg"), "rb") as tile_file:
mock_image: object = tile_file.read()
matcher_get_capabilities = re.compile("WMTSCapabilities.xml")
matcher_wms = re.compile(
"https://gibs.earthdata.nasa.gov/wms/epsg4326/best/wms.cgi?"
)
matcher_wmts = re.compile(
"https://gibs.earthdata.nasa.gov/wmts/epsg3857/"
"best/MODIS_Terra_CorrectedReflectance_TrueColor/"
)
matcher_get_capabilities = re.compile("WMTSCapabilities.xml")
requests_mock.get(matcher_get_capabilities, content=mock_xml)
requests_mock.get(matcher_wms, content=mock_image)
requests_mock.get(matcher_wmts, content=mock_image)
query = STACQuery.from_dict({"bbox": [76.231358, 9.909276, 76.300637, 9.971047]})
res = modis_instance.fetch(query, dry_run=True)
assert len(res.features) == 1
def test_aoiclipped_fetcher_fetch(requests_mock, modis_instance):
"""
Mocked test for fetching data - quicker than the live one and therefore valuable for testing
purposes
"""
_location_ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(_location_, "mock_data/tile.jpg"), "rb") as tile_file:
mock_image: object = tile_file.read()
with open(
os.path.join(_location_, "mock_data/available_imagery_layers.xml"), "rb"
) as xml_file:
mock_xml: object = xml_file.read()
matcher_wms = re.compile(
"https://gibs.earthdata.nasa.gov/wms/epsg4326/best/wms.cgi?"
)
matcher_wmts = re.compile(
"https://gibs.earthdata.nasa.gov/wmts/epsg3857/"
"best/MODIS_Terra_CorrectedReflectance_TrueColor/"
)
matcher_get_capabilities = re.compile("WMTSCapabilities.xml")
requests_mock.get(matcher_get_capabilities, content=mock_xml)
requests_mock.get(matcher_wms, content=mock_image)
requests_mock.get(matcher_wmts, content=mock_image)
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2018-11-01T16:40:49+00:00/2018-11-20T16:41:49+00:00",
"limit": 1,
"bbox": [
123.59349578619005,
-10.188159969024264,
123.70257586240771,
-10.113232998848046,
],
"imagery_layers": ["MODIS_Terra_CorrectedReflectance_TrueColor"],
}
)
result = modis_instance.fetch(query, dry_run=False)
assert len(result.features) == 1
img_filename = f"/tmp/output/{result.features[0]['properties']['up42.data_path']}"
assert cog_validate(img_filename)[0]
with rio.open(img_filename) as dataset:
band2 = dataset.read(2)
assert np.sum(band2) == 7954025
assert dataset.tags(1)["layer"] == "MODIS_Terra_CorrectedReflectance_TrueColor"
assert dataset.tags(1)["band"] == str(1)
assert dataset.tags(2)["band"] == str(2)
assert os.path.isfile(f"/tmp/quicklooks/{result.features[0]['id']}.jpg")
def test_aoiclipped_dry_run_error_name_fetcher_fetch(requests_mock, modis_instance):
"""
Mocked test for fetching data with error in name
"""
_location_ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(
os.path.join(_location_, "mock_data/available_imagery_layers.xml"), "rb"
) as xml_file:
mock_xml: object = xml_file.read()
matcher_get_capabilities = re.compile("WMTSCapabilities.xml")
requests_mock.get(matcher_get_capabilities, content=mock_xml)
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2018-11-01T16:40:49+00:00/2018-11-20T16:41:49+00:00",
"limit": 1,
"bbox": [
123.59349578619005,
-10.188159969024264,
123.70257586240771,
-10.113232998848046,
],
"imagery_layers": ["AN_ERROR_FOR_SURE"],
}
)
with pytest.raises(UP42Error, match=r".*['AN_ERROR_FOR_SURE'].*"):
modis_instance.fetch(query, dry_run=True)
def test_aoiclipped_dry_run_multiple_error_name_fetcher_fetch(
requests_mock, modis_instance
):
"""
Mocked test for fetching data with error in name
"""
_location_ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(
os.path.join(_location_, "mock_data/available_imagery_layers.xml"), "rb"
) as xml_file:
mock_xml: object = xml_file.read()
matcher_get_capabilities = re.compile("WMTSCapabilities.xml")
requests_mock.get(matcher_get_capabilities, content=mock_xml)
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2018-11-01T16:40:49+00:00/2018-11-20T16:41:49+00:00",
"limit": 1,
"bbox": [
123.59349578619005,
-10.188159969024264,
123.70257586240771,
-10.113232998848046,
],
"imagery_layers": [
"MODIS_Terra_CorrectedReflectance_TrueColor",
"MODIS_Aqua_CorrectedReflectance_TrueColor",
"12345",
"AN_ERROR_FOR_SURE",
],
}
)
with pytest.raises(UP42Error, match=r".*['12345','AN_ERROR_FOR_SURE'].*"):
modis_instance.fetch(query, dry_run=True)
def test_aoiclipped_dry_run_error_geom_fetcher_fetch(requests_mock, modis_instance):
"""
Mocked test for fetching data with error in geom
"""
_location_ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(
os.path.join(_location_, "mock_data/available_imagery_layers.xml"), "rb"
) as xml_file:
mock_xml: object = xml_file.read()
matcher_get_capabilities = re.compile("WMTSCapabilities.xml")
requests_mock.get(matcher_get_capabilities, content=mock_xml)
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2018-11-01T16:40:49+00:00/2018-11-20T16:41:49+00:00",
"limit": 1,
"bbox": [179, 89, 180, 90],
"imagery_layers": ["MODIS_Terra_CorrectedReflectance_TrueColor"],
}
)
with pytest.raises(UP42Error):
modis_instance.fetch(query, dry_run=True)
@pytest.mark.live
def test_aoiclipped_fetcher_fetch_live(modis_instance):
"""
Unmocked ("live") test for fetching data
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2019-01-01T16:40:49+00:00/2019-01-25T16:41:49+00:00",
"limit": 2,
"bbox": [
38.941807150840766,
21.288749561718983,
39.686130881309516,
21.808610762909364,
],
"imagery_layers": ["MODIS_Terra_CorrectedReflectance_TrueColor"],
}
)
result = modis_instance.fetch(query, dry_run=False)
assert len(result.features) == 2
img_filename = f"/tmp/output/{result.features[0]['properties']['up42.data_path']}"
with rio.open(img_filename) as dataset:
band2 = dataset.read(2)
assert np.sum(band2) == 28351388
assert os.path.isfile(f"/tmp/quicklooks/{result.features[0]['id']}.jpg")
@pytest.mark.live
def test_aoiclipped_fetcher_virs_fetch_live(modis_instance):
"""
Unmocked ("live") test for fetching VIIRS data in png
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2019-01-01T16:40:49+00:00/2019-01-25T16:41:49+00:00",
"limit": 2,
"bbox": [
38.941807150840766,
21.288749561718983,
39.686130881309516,
21.808610762909364,
],
"imagery_layers": ["VIIRS_SNPP_Brightness_Temp_BandI5_Night"],
}
)
result = modis_instance.fetch(query, dry_run=False)
assert len(result.features) == 2
img_filename = f"/tmp/output/{result.features[0]['properties']['up42.data_path']}"
with rio.open(img_filename) as dataset:
band1 = dataset.read(1)
assert np.sum(band1) == 45232508
assert dataset.count == 1
assert os.path.isfile(f"/tmp/quicklooks/{result.features[0]['id']}.jpg")
assert cog_validate(img_filename)[0]
@pytest.mark.live
def test_aoiclipped_fetcher_rio_tags_fetch_live(modis_instance):
"""
Unmocked ("live") test for fetching MODIS and VIRS data with tags
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2019-01-01T16:40:49+00:00/2019-01-25T16:41:49+00:00",
"limit": 2,
"bbox": [
38.941807150840766,
21.288749561718983,
39.686130881309516,
21.808610762909364,
],
"imagery_layers": [
"MODIS_Terra_CorrectedReflectance_TrueColor",
"VIIRS_SNPP_Brightness_Temp_BandI5_Night",
],
}
)
result = modis_instance.fetch(query, dry_run=False)
assert len(result.features) == 2
img_filename = f"/tmp/output/{result.features[0]['properties']['up42.data_path']}"
with rio.open(img_filename) as dataset:
assert dataset.count == 4
band1 = dataset.read(1)
assert np.sum(band1) == 29570538
assert dataset.tags(1)["layer"] == "MODIS_Terra_CorrectedReflectance_TrueColor"
assert dataset.tags(1)["band"] == str(1)
assert dataset.tags(4)["layer"] == "VIIRS_SNPP_Brightness_Temp_BandI5_Night"
assert dataset.tags(4)["band"] == str(1)
assert os.path.isfile(f"/tmp/quicklooks/{result.features[0]['id']}.jpg")
@pytest.mark.live
def test_aoiclipped_fetcher_multiple_fetch_live(modis_instance):
"""
Unmocked ("live") test for fetching data, multiple imagery_layers
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2019-01-01T16:40:49+00:00/2019-01-25T16:41:49+00:00",
"limit": 2,
"bbox": [
38.941807150840766,
21.288749561718983,
39.686130881309516,
21.808610762909364,
],
"imagery_layers": [
"MODIS_Terra_CorrectedReflectance_TrueColor",
"MODIS_Aqua_CorrectedReflectance_TrueColor",
],
}
)
result = modis_instance.fetch(query, dry_run=False)
assert len(result.features) == 2
img_filename = f"/tmp/output/{result.features[0]['properties']['up42.data_path']}"
with rio.open(img_filename) as dataset:
band2 = dataset.read(2)
assert np.sum(band2) == 28351388
assert dataset.count == 6
assert os.path.isfile(f"/tmp/quicklooks/{result.features[0]['id']}.jpg")
assert cog_validate(img_filename)[0]
@pytest.mark.live
def test_aoiclipped_fetcher_layer_error_fetch_live(modis_instance):
"""
Unmocked ("live") test for fetching data, error in name of layer
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2019-01-01T16:40:49+00:00/2019-01-25T16:41:49+00:00",
"limit": 2,
"bbox": [
38.941807150840766,
21.288749561718983,
39.686130881309516,
21.808610762909364,
],
"imagery_layers": [
"MODIS_Terra_CorrectedReflectance_TrueColor",
"AN_ERROR_FOR_SURE",
],
}
)
with pytest.raises(UP42Error, match=r".*['AN_ERROR_FOR_SURE'].*"):
modis_instance.fetch(query, dry_run=False)
@pytest.mark.live
def test_aoiclipped_fetcher_geom_error_fetch_live(modis_instance):
"""
Unmocked ("live") test for fetching data, error in geometry of layer
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2019-01-01T16:40:49+00:00/2019-01-25T16:41:49+00:00",
"limit": 2,
"bbox": [200, 200, 210, 210],
"imagery_layers": ["MODIS_Terra_CorrectedReflectance_TrueColor"],
}
)
with pytest.raises(UP42Error):
modis_instance.fetch(query, dry_run=False)
@pytest.mark.live
def test_aoiclipped_fetcher_layers_cog(modis_instance):
"""
Unmocked ("live") test for fetching data. Tests cog conversion with image, with 7 bands.
"""
query = STACQuery.from_dict(
{
"zoom_level": 9,
"time": "2019-01-01T16:40:49+00:00/2021-02-15T23:59:59+00:00",
"limit": 1,
"bbox": [
38.941807150840766,
21.288749561718983,
39.686130881309516,
21.808610762909364,
],
"imagery_layers": [
"MODIS_Terra_CorrectedReflectance_TrueColor",
"MODIS_Terra_EVI_8Day",
"MODIS_Terra_CorrectedReflectance_Bands721",
],
}
)
result = modis_instance.fetch(query, dry_run=False)
assert len(result.features) == 1
img_filename = f"/tmp/output/{result.features[0]['properties']['up42.data_path']}"
with rio.open(img_filename) as dataset:
band2 = dataset.read(2)
assert np.sum(band2) == 28202042
assert dataset.count == 7
assert cog_validate(img_filename)[0]
| 31.576854
| 96
| 0.60812
| 2,059
| 17,462
| 4.929577
| 0.105877
| 0.042266
| 0.017143
| 0.06532
| 0.919803
| 0.916158
| 0.904335
| 0.904335
| 0.889754
| 0.871034
| 0
| 0.126785
| 0.266006
| 17,462
| 552
| 97
| 31.634058
| 0.665132
| 0.067232
| 0
| 0.746231
| 0
| 0.037688
| 0.242283
| 0.173121
| 0
| 0
| 0
| 0
| 0.09799
| 1
| 0.042714
| false
| 0
| 0.020101
| 0.002513
| 0.065327
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
00bacecc8f0ce2248b41664630905d4d73f1d594
| 120
|
py
|
Python
|
app/pages/__init__.py
|
Anioko/CMS
|
b6465faf2a5d7333f494526bcddf8083d6807aee
|
[
"MIT"
] | null | null | null |
app/pages/__init__.py
|
Anioko/CMS
|
b6465faf2a5d7333f494526bcddf8083d6807aee
|
[
"MIT"
] | 1
|
2021-06-02T01:40:15.000Z
|
2021-06-02T01:40:15.000Z
|
app/pages/__init__.py
|
Anioko/CMS
|
b6465faf2a5d7333f494526bcddf8083d6807aee
|
[
"MIT"
] | null | null | null |
from app.pages import errors # noqa
from app.pages.views import pages # noqa
#from app.pages.forms import pages # noqa
| 30
| 41
| 0.766667
| 20
| 120
| 4.6
| 0.4
| 0.228261
| 0.391304
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158333
| 120
| 3
| 42
| 40
| 0.910891
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
da9a60623eabb0e818682d2da1022535abbd4734
| 87,402
|
py
|
Python
|
Define_Model/ResNet.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 8
|
2020-08-26T13:32:56.000Z
|
2022-01-18T21:05:46.000Z
|
Define_Model/ResNet.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 1
|
2020-07-24T17:06:16.000Z
|
2020-07-24T17:06:16.000Z
|
Define_Model/ResNet.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 5
|
2020-12-11T03:31:15.000Z
|
2021-11-23T15:57:55.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: ResNet.py
@Time: 2019/10/10 下午5:09
@Overview: Deep Speaker using Resnet with CNN, which is not ordinary Resnet.
This file define resnet in 'Deep Residual Learning for Image Recognition'
For all model, the pre_forward function is for extract vectors and forward for classification.
"""
import torch
import torch.nn.functional as F
from torch import nn
from torchvision.models.resnet import BasicBlock
from torchvision.models.resnet import Bottleneck
from torchvision.models.densenet import _DenseBlock
from torchvision.models.shufflenetv2 import InvertedResidual
from Define_Model.FilterLayer import TimeMaskLayer, FreqMaskLayer, SqueezeExcitation, GAIN, fBLayer, fBPLayer, fLLayer
from Define_Model.FilterLayer import fDLR, GRL, L2_Norm, Mean_Norm, Inst_Norm, MeanStd_Norm, CBAM
from Define_Model.Pooling import SelfAttentionPooling, AttentionStatisticPooling, StatisticPooling, AdaptiveStdPool2d, \
SelfVadPooling, GhostVLAD_v2
from Define_Model.FilterLayer import fDLR, GRL
from Define_Model.Pooling import SelfAttentionPooling, AttentionStatisticPooling, StatisticPooling, AdaptiveStdPool2d, \
SelfVadPooling
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def conv3x3(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=1,
stride=stride, bias=False)
class SEBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, reduction_ratio=4):
super(SEBasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.reduction_ratio = reduction_ratio
# Squeeze-and-Excitation
self.se_layer = SqueezeExcitation(inplanes=planes, reduction_ratio=reduction_ratio)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.se_layer(out)
out += identity
out = self.relu(out)
return out
class CBAMBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, reduction_ratio=16):
super(CBAMBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.reduction_ratio = reduction_ratio
# Squeeze-and-Excitation
self.CBAM_layer = CBAM(planes, planes)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.CBAM_layer(out)
out += identity
out = self.relu(out)
return out
class Res2Conv2dReluBn(nn.Module):
'''
in_channels == out_channels == channels
'''
def __init__(self, channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False, scale=4):
super().__init__()
assert channels % scale == 0, "{} % {} != 0".format(channels, scale)
self.scale = scale
self.width = channels // scale
self.nums = scale if scale == 1 else scale - 1
self.convs = []
self.bns = []
for i in range(self.nums):
self.convs.append(nn.Conv2d(self.width, self.width, kernel_size, stride, padding, dilation, bias=bias))
self.bns.append(nn.BatchNorm2d(self.width))
self.convs = nn.ModuleList(self.convs)
self.bns = nn.ModuleList(self.bns)
def forward(self, x):
out = []
spx = torch.split(x, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
# Order: conv -> relu -> bn
sp = self.convs[i](sp)
sp = self.bns[i](F.relu(sp))
out.append(sp)
if self.scale != 1:
out.append(spx[self.nums])
out = torch.cat(out, dim=1)
return out
class Conv2dReluBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
return self.bn(F.relu(self.conv(x)))
''' SE-Res2Block.
Note: residual connection is implemented in the ECAPA_TDNN model, not here.
'''
class SE_Res2Block(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, stride=1, dilation=1,
scale=8, reduction_ratio=2):
super(SE_Res2Block, self).__init__()
self.scale = scale
self.stride = stride
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = Conv2dReluBn(inplanes, planes, kernel_size=1, stride=1, padding=0),
self.conv2 = Res2Conv2dReluBn(planes, kernel_size, stride, padding, dilation, scale=scale),
self.conv3 = Conv2dReluBn(planes, planes, kernel_size=1, stride=1, padding=0),
# Squeeze-and-Excitation
self.se_layer = SqueezeExcitation(inplanes=planes, reduction_ratio=reduction_ratio)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.se_layer(out)
out += identity
out = self.relu(out)
return out
class Block3x3(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Block3x3, self).__init__()
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv3x3(planes, planes)
self.bn3 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class InstBlock3x3(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(InstBlock3x3, self).__init__()
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = nn.InstanceNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.InstanceNorm2d(planes)
self.conv3 = conv3x3(planes, planes)
self.bn3 = nn.InstanceNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class VarSizeConv(nn.Module):
def __init__(self, inplanes, planes, stride=1, kernel_size=[3, 5, 9]):
super(VarSizeConv, self).__init__()
self.stide = stride
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size[0], stride=stride, padding=1)
self.bn1 = nn.InstanceNorm2d(planes)
self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size[1], stride=stride, padding=2)
self.bn2 = nn.InstanceNorm2d(planes)
self.conv3 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size[2], stride=stride, padding=4)
self.bn3 = nn.InstanceNorm2d(planes)
self.avg = nn.AvgPool2d(kernel_size=int(stride * 2 + 1), stride=stride, padding=stride)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x2 = self.conv2(x)
x2 = self.bn2(x2)
x3 = self.conv3(x)
x3 = self.bn3(x3)
if self.stide != 1:
x = self.avg(x)
return torch.cat([x, x1, x2, x3], dim=1)
# return torch.cat([x, x1, x2, x3], dim=1)
class SimpleResNet(nn.Module):
def __init__(self, block=BasicBlock,
num_classes=1000,
embedding_size=128,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None, **kwargs):
super(SimpleResNet, self).__init__()
layers = [3, 4, 6, 3]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.embedding_size=embedding_size
self.inplanes = 16
self.dilation = 1
num_filter = [16, 32, 64, 128]
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(1, num_filter[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(num_filter[0])
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
# num_filter = [16, 32, 64, 128]
self.layer1 = self._make_layer(block, num_filter[0], layers[0])
self.layer2 = self._make_layer(block, num_filter[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, num_filter[2], layers[2], stride=2)
self.layer4 = self._make_layer(block, num_filter[3], layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Linear(128 * block.expansion, embedding_size)
# self.norm = self.l2_norm(num_filter[3])
self.alpha = 12
self.fc2 = nn.Linear(embedding_size, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal(m.weight, mean=0., std=1.)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant(m.weight, 1)
nn.init.constant(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant(m.bn2.weight, 0)
def l2_norm(self, input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _forward(self, x):
# pdb.set_trace()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# print(x.shape)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# pdb.set_trace()
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.l2_norm(x)
embeddings = x * self.alpha
x = self.fc2(embeddings)
return x, embeddings
# Allow for accessing forward method in a inherited class
forward = _forward
# Analysis of Length Normalization in End-to-End Speaker Verification System
# https://arxiv.org/abs/1806.03209
<<<<<<< HEAD
class ExporingResNet(nn.Module):
def __init__(self, resnet_size=34, block=BasicBlock, inst_norm=True, kernel_size=5, stride=1, padding=2,
feat_dim=64,
num_classes=1000, embedding_size=128, fast=False, time_dim=2, avg_size=4, alpha=12, encoder_type='SAP',
zero_init_residual=False, groups=1, width_per_group=64, input_dim=257, sr=16000, filter=True,
replace_stride_with_dilation=None,
norm_layer=None, **kwargs):
super(ExporingResNet, self).__init__()
=======
class ThinResNet(nn.Module):
def __init__(self, resnet_size=34, block_type='None', expansion=1, channels=[16, 32, 64, 128],
input_len=300, inst_norm=True, input_dim=257, sr=16000, gain_axis='both',
kernel_size=5, stride=1, padding=2, dropout_p=0.0, exp=False, filter_fix=False,
feat_dim=64, num_classes=1000, embedding_size=128, fast='None', time_dim=1, avg_size=4,
alpha=12, encoder_type='STAP', zero_init_residual=False, groups=1, width_per_group=64,
filter=None, replace_stride_with_dilation=None, norm_layer=None,
mask='None', mask_len=10,
input_norm='', gain_layer=False, **kwargs):
super(ThinResNet, self).__init__()
>>>>>>> Server/Server
resnet_type = {8: [1, 1, 1, 0],
10: [1, 1, 1, 1],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3]}
layers = resnet_type[resnet_size]
<<<<<<< HEAD
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.inst_norm = inst_norm
self.filter = filter
self._norm_layer = norm_layer
=======
freq_dim = avg_size # default 1
time_dim = time_dim # default 4
self.input_len = input_len
self.input_dim = input_dim
self.inst_norm = inst_norm
self.filter = filter
self._norm_layer = nn.BatchNorm2d
>>>>>>> Server/Server
self.embedding_size = embedding_size
self.dropout_p = dropout_p
self.gain_layer = gain_layer
self.gain_axis = gain_axis
self.mask = mask
self.dilation = 1
<<<<<<< HEAD
self.fast = fast
num_filter = [16, 32, 64, 128]
=======
self.fast = str(fast)
self.num_filter = channels # [16, 32, 64, 128]
self.inplanes = self.num_filter[0]
if block_type == "seblock":
block = SEBasicBlock
elif block_type == 'cbam':
block = CBAMBlock
else:
block = BasicBlock if resnet_size < 50 else Bottleneck
block.expansion = expansion
# num_filter = [32, 64, 128, 256]
>>>>>>> Server/Server
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
<<<<<<< HEAD
self.filter_layer = fDLR(input_dim=input_dim, sr=sr, num_filter=feat_dim)
self.conv1 = nn.Conv2d(1, num_filter[0], kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bn1 = norm_layer(num_filter[0])
self.relu = nn.ReLU(inplace=True)
if self.fast:
self.maxpool = nn.Sequential(nn.MaxPool2d(kernel_size=(3, 1), stride=(2, 1), padding=(1, 0)),
nn.AvgPool2d(kernel_size=(1, 3), stride=(1, 2), padding=(0, 1))
)
self.layer1 = self._make_layer(block, num_filter[0], layers[0])
self.layer2 = self._make_layer(block, num_filter[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, num_filter[2], layers[2], stride=2)
if self.fast:
self.layer4 = self._make_layer(block, num_filter[3], layers[3], stride=1)
else:
self.layer4 = self._make_layer(block, num_filter[3], layers[3], stride=2)
# [64, 128, 37, 8]
freq_dim = avg_size # default 1
time_dim = time_dim # default 4
=======
if self.filter == 'fDLR':
self.filter_layer = fDLR(input_dim=input_dim, sr=sr, num_filter=feat_dim, exp=exp, filter_fix=filter_fix)
elif self.filter == 'fBLayer':
self.filter_layer = fBLayer(input_dim=input_dim, sr=sr, num_filter=feat_dim, exp=exp, filter_fix=filter_fix)
elif self.filter == 'fBPLayer':
self.filter_layer = fBPLayer(input_dim=input_dim, sr=sr, num_filter=feat_dim, exp=exp,
filter_fix=filter_fix)
elif self.filter == 'fLLayer':
self.filter_layer = fLLayer(input_dim=input_dim, num_filter=feat_dim, exp=exp)
elif self.filter == 'Avg':
self.filter_layer = nn.AvgPool2d(kernel_size=(1, 7), stride=(1, 3))
else:
self.filter_layer = None
self.input_norm = input_norm
if input_norm == 'Instance':
self.inst_layer = Inst_Norm(input_dim)
elif input_norm == 'Mean':
self.inst_layer = Mean_Norm()
else:
self.inst_layer = None
if self.mask == "time":
self.maks_layer = TimeMaskLayer(mask_len=mask_len)
elif self.mask == "freq":
self.mask = FreqMaskLayer(mask_len=mask_len)
elif self.mask == "time_freq":
self.mask_layer = nn.Sequential(
TimeMaskLayer(),
FreqMaskLayer()
)
else:
self.mask_layer = None
self.conv1 = nn.Conv2d(1, self.num_filter[0], kernel_size=kernel_size, stride=stride, padding=padding)
self.bn1 = self._norm_layer(self.num_filter[0])
self.relu = nn.ReLU(inplace=True)
if self.fast.startswith('avp'):
# self.maxpool = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.maxpool = nn.AvgPool2d(kernel_size=(3, 3), stride=(1, 2), padding=(1, 1))
elif self.fast.startswith('mxp'):
self.maxpool = nn.MaxPool2d(kernel_size=(3, 3), stride=(1, 2), padding=(1, 1))
else:
self.maxpool = None
self.layer1 = self._make_layer(block, self.num_filter[0], layers[0])
self.layer2 = self._make_layer(block, self.num_filter[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, self.num_filter[2], layers[2], stride=2)
if self.fast in ['avp1', 'mxp1']:
self.layer4 = self._make_layer(block, self.num_filter[3], layers[3], stride=1)
else:
self.layer4 = self._make_layer(block, self.num_filter[3], layers[3], stride=2)
self.gain = GAIN(time=self.input_len, freq=self.input_dim) if self.gain_layer else None
self.dropout = nn.Dropout(self.dropout_p)
# [64, 128, 37, 8]
>>>>>>> Server/Server
# self.avgpool = nn.AvgPool2d(kernel_size=(3, 4), stride=(2, 1))
# 300 is the length of features
if encoder_type == 'SAP':
<<<<<<< HEAD
self.avgpool = nn.AdaptiveAvgPool2d((time_dim, freq_dim))
self.encoder = SelfAttentionPooling(input_dim=num_filter[3], hidden_dim=num_filter[3])
self.fc1 = nn.Sequential(
nn.Linear(num_filter[3], embedding_size),
nn.BatchNorm1d(embedding_size)
)
elif encoder_type == 'SASP':
self.avgpool = nn.AdaptiveAvgPool2d((time_dim, freq_dim))
self.encoder = AttentionStatisticPooling(input_dim=num_filter[3], hidden_dim=num_filter[3])
self.fc1 = nn.Sequential(
nn.Linear(num_filter[3] * 2, embedding_size),
nn.BatchNorm1d(embedding_size)
)
elif encoder_type == 'STAP':
self.avgpool = nn.AdaptiveAvgPool2d((None, freq_dim))
self.encoder = StatisticPooling(input_dim=num_filter[3])
self.fc1 = nn.Sequential(
nn.Linear(num_filter[3] * 2, embedding_size),
nn.BatchNorm1d(embedding_size)
)
elif encoder_type == 'ASTP':
self.avgpool = AdaptiveStdPool2d((time_dim, freq_dim))
self.encoder = None
self.fc1 = nn.Sequential(
nn.Linear(num_filter[3] * freq_dim * time_dim, embedding_size),
nn.BatchNorm1d(embedding_size)
)
else:
self.avgpool = nn.AdaptiveAvgPool2d((time_dim, freq_dim))
self.encoder = None
self.fc1 = nn.Sequential(
nn.Linear(num_filter[3] * freq_dim * time_dim, embedding_size),
nn.BatchNorm1d(embedding_size)
)
self.alpha = alpha
=======
self.avgpool = nn.AdaptiveAvgPool2d((None, freq_dim))
self.encoder = SelfAttentionPooling(input_dim=self.num_filter[3] * block.expansion,
hidden_dim=self.num_filter[3] * block.expansion)
self.encoder_output = self.num_filter[3] * block.expansion
elif encoder_type == 'SASP':
self.avgpool = nn.AdaptiveAvgPool2d((time_dim, freq_dim))
self.encoder = AttentionStatisticPooling(input_dim=self.num_filter[3] * block.expansion,
hidden_dim=self.num_filter[3])
self.encoder_output = self.num_filter[3] * 2 * block.expansion
elif encoder_type == 'STAP':
self.avgpool = nn.AdaptiveAvgPool2d((None, freq_dim))
self.encoder = StatisticPooling(input_dim=self.num_filter[3] * freq_dim * block.expansion)
self.encoder_output = self.num_filter[3] * freq_dim * 2 * block.expansion
elif encoder_type == 'ASTP':
self.avgpool = AdaptiveStdPool2d((time_dim, freq_dim))
self.encoder = None
self.encoder_output = self.num_filter[3] * freq_dim * time_dim * block.expansion
else:
self.avgpool = nn.AdaptiveAvgPool2d((time_dim, freq_dim))
self.encoder = None
self.encoder_output = self.num_filter[3] * freq_dim * time_dim * block.expansion
self.fc1 = nn.Sequential(
nn.Linear(self.encoder_output, embedding_size),
nn.BatchNorm1d(embedding_size)
)
self.alpha = alpha
if self.alpha:
self.l2_norm = L2_Norm(self.alpha)
>>>>>>> Server/Server
self.classifier = nn.Linear(embedding_size, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, mean=0., std=1.)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch, so that the residual branch
# starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
<<<<<<< HEAD
nn.init.constant(m.bn2.weight, 0)
def l2_norm(self, input):
if self.alpha > 0:
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output * self.alpha
else:
return input
=======
nn.init.constant_(m.bn2.weight, 0)
>>>>>>> Server/Server
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _forward(self, x):
# pdb.set_trace()
# print(x.shape)
<<<<<<< HEAD
if self.filter:
x = self.filter_layer(x)
x = torch.log(x)
if self.inst_norm:
x = x - torch.mean(x, dim=-2, keepdim=True)
=======
if self.filter_layer != None:
x = self.filter_layer(x)
if self.inst_layer != None:
x = self.inst_layer(x)
if self.mask_layer != None:
x = self.mask_layer(x)
>>>>>>> Server/Server
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
<<<<<<< HEAD
if self.fast:
=======
if self.maxpool != None:
>>>>>>> Server/Server
x = self.maxpool(x)
# print(x.shape)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# print(x.shape)
x = self.avgpool(x)
if self.encoder != None:
x = self.encoder(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
<<<<<<< HEAD
feat = self.l2_norm(x)
x = self.classifier(feat)
=======
if self.alpha:
x = self.l2_norm(x)
logits = self.classifier(x)
>>>>>>> Server/Server
return logits, x
# Allow for accessing forward method in a inherited class
forward = _forward
class ResNet(nn.Module):
def __init__(self, resnet_size=18, embedding_size=512, block=BasicBlock,
channels=[64, 128, 256, 512], num_classes=1000,
avg_size=4, zero_init_residual=False, **kwargs):
super(ResNet, self).__init__()
resnet_layer = {10: [1, 1, 1, 1],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3]}
layers = resnet_layer[resnet_size]
self.layers = layers
self.avg_size = avg_size
self.channels = channels
self.inplanes = self.channels[0]
self.conv1 = nn.Conv2d(1, self.channels[0], kernel_size=5, stride=2, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(self.channels[0])
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, self.channels[0], layers[0])
self.layer2 = self._make_layer(block, self.channels[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, self.channels[2], layers[2], stride=2)
self.layer4 = self._make_layer(block, self.channels[3], layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, avg_size))
if self.layers[3] == 0:
self.fc1 = nn.Sequential(
nn.Linear(self.channels[2] * avg_size, embedding_size),
nn.BatchNorm1d(embedding_size)
)
else:
self.fc1 = nn.Sequential(
nn.Linear(self.channels[3] * avg_size, embedding_size),
nn.BatchNorm1d(embedding_size)
)
self.classifier = nn.Linear(embedding_size, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch, so that the residual
# branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if self.layers[3] != 0:
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
feat = self.fc1(x)
logits = self.classifier(feat)
return logits, feat
# model = SimpleResNet(block=BasicBlock, layers=[3, 4, 6, 3])
# input = torch.torch.randn(128,1,400,64)
# x_vectors = model.pre_forward(input)
# outputs = model(x_vectors)
# print('hello')
# M. Hajibabaei and D. Dai, “Unified hypersphere embedding for speaker recognition,”
# arXiv preprint arXiv:1807.08312, 2018.
class ResNet20(nn.Module):
def __init__(self, num_classes=1000, embedding_size=128, dropout_p=0.0,
block=BasicBlock, input_frames=300, **kwargs):
super(ResNet20, self).__init__()
self.dropout_p = dropout_p
self.inplanes = 1
self.layer1 = self._make_layer(Block3x3, planes=64, blocks=1, stride=2)
self.inplanes = 64
self.layer2 = self._make_layer(Block3x3, planes=128, blocks=1, stride=2)
self.inplanes = 128
self.layer3 = self._make_layer(BasicBlock, 128, 1)
self.inplanes = 128
self.layer4 = self._make_layer(Block3x3, planes=256, blocks=1, stride=2)
self.inplanes = 256
self.layer5 = self._make_layer(BasicBlock, 256, 3)
self.inplanes = 256
self.layer6 = self._make_layer(Block3x3, planes=512, blocks=1, stride=2)
self.inplanes = 512
self.avgpool = nn.AdaptiveAvgPool2d((1, None))
self.dropout = nn.Dropout(p=dropout_p)
self.fc1 = nn.Sequential(
nn.Linear(17 * self.inplanes, embedding_size),
nn.BatchNorm1d(embedding_size)
)
self.classifier = nn.Linear(embedding_size, num_classes)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.dropout_p != 0:
x = self.dropout(x)
feat = self.fc1(x)
logits = self.classifier(feat)
return logits, feat
class LocalResNet(nn.Module):
"""
Define the ResNet model with A-softmax and AM-softmax loss.
Added dropout as https://github.com/nagadomi/kaggle-cifar10-torch7 after average pooling and fc layer.
"""
def __init__(self, embedding_size, num_classes, block_type='basic',
input_dim=161, input_len=300, gain_layer=False,
relu_type='relu', resnet_size=8, channels=[64, 128, 256], dropout_p=0., encoder_type='None',
input_norm=None, alpha=12, stride=2, transform=False, time_dim=1, fast=False,
avg_size=4, kernal_size=5, padding=2, filter=None, mask='None', mask_len=25, **kwargs):
super(LocalResNet, self).__init__()
resnet_type = {8: [1, 1, 1, 0],
10: [1, 1, 1, 1],
14: [2, 2, 2, 0],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3]}
layers = resnet_type[resnet_size]
if block_type == "seblock":
block = SEBasicBlock
elif block_type == 'cbam':
block = CBAMBlock
else:
block = BasicBlock
self.input_len = input_len
self.input_dim = input_dim
self.alpha = alpha
self.layers = layers
self.dropout_p = dropout_p
self.transform = transform
self.fast = fast
self.mask = mask
self.relu_type = relu_type
self.embedding_size = embedding_size
self.gain_layer = gain_layer
#
if self.relu_type == 'relu6':
self.relu = nn.ReLU6(inplace=True)
elif self.relu_type == 'leakyrelu':
self.relu = nn.LeakyReLU()
elif self.relu_type == 'relu':
self.relu = nn.ReLU(inplace=True)
self.input_norm = input_norm
self.input_len = input_len
self.filter = filter
if self.filter == 'Avg':
self.filter_layer = nn.AvgPool2d(kernel_size=(1, 5), stride=(1, 2))
else:
self.filter_layer = None
if input_norm == 'Inst':
self.inst_layer = Inst_Norm(self.input_len)
elif input_norm == 'Mean':
self.inst_layer = Mean_Norm()
elif input_norm == 'Mstd':
self.inst_layer = MeanStd_Norm()
else:
self.inst_layer = None
if self.mask == "time":
self.maks_layer = TimeMaskLayer(mask_len=mask_len)
elif self.mask == "freq":
self.mask = FreqMaskLayer(mask_len=mask_len)
elif self.mask == "time_freq":
self.mask_layer = nn.Sequential(
TimeMaskLayer(),
FreqMaskLayer()
)
else:
self.mask_layer = None
self.inplanes = channels[0]
self.conv1 = nn.Conv2d(1, channels[0], kernel_size=kernal_size, stride=stride, padding=padding)
self.bn1 = nn.BatchNorm2d(channels[0])
if self.fast.startswith('avp'):
# self.maxpool = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
# self.maxpool = nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.maxpool = nn.Sequential(
nn.Conv2d(channels[0], channels[0], kernel_size=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(channels[0]),
nn.AvgPool2d(kernel_size=3, stride=2)
)
else:
self.maxpool = None
# self.maxpool = nn.MaxPool2d(kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.layer1 = self._make_layer(block, channels[0], layers[0])
self.inplanes = channels[1]
self.conv2 = nn.Conv2d(channels[0], channels[1], kernel_size=(5, 5), stride=2,
padding=padding, bias=False)
self.bn2 = nn.BatchNorm2d(channels[1])
self.layer2 = self._make_layer(block, channels[1], layers[1])
self.inplanes = channels[2]
self.conv3 = nn.Conv2d(channels[1], channels[2], kernel_size=(5, 5), stride=2,
padding=padding, bias=False)
self.bn3 = nn.BatchNorm2d(channels[2])
self.layer3 = self._make_layer(block, channels[2], layers[2])
if layers[3] != 0:
assert len(channels) == 4
self.inplanes = channels[3]
stride = 1 if self.fast else 2
self.conv4 = nn.Conv2d(channels[2], channels[3], kernel_size=(5, 5), stride=stride,
padding=padding, bias=False)
self.bn4 = nn.BatchNorm2d(channels[3])
self.layer4 = self._make_layer(block=block, planes=channels[3], blocks=layers[3])
self.gain = GAIN(time=self.input_len, freq=self.input_dim) if self.gain_layer else None
self.dropout = nn.Dropout(self.dropout_p)
last_conv_chn = channels[-1]
freq_dim = avg_size
if encoder_type == 'SAP':
self.avgpool = nn.AdaptiveAvgPool2d((None, freq_dim))
self.encoder = SelfAttentionPooling(input_dim=last_conv_chn*freq_dim, hidden_dim=int(last_conv_chn/2))
self.encoder_output = last_conv_chn*freq_dim
elif encoder_type == 'SASP':
self.avgpool = nn.AdaptiveAvgPool2d((time_dim, freq_dim))
self.encoder = AttentionStatisticPooling(input_dim=last_conv_chn, hidden_dim=last_conv_chn)
self.encoder_output = last_conv_chn * 2
elif encoder_type == 'STAP':
self.avgpool = nn.AdaptiveAvgPool2d((None, freq_dim))
self.encoder = StatisticPooling(input_dim=last_conv_chn * freq_dim)
self.encoder_output = last_conv_chn * freq_dim * 2
elif encoder_type == 'ASTP':
self.avgpool = AdaptiveStdPool2d((time_dim, freq_dim))
self.encoder = None
self.encoder_output = last_conv_chn * freq_dim * time_dim
else:
self.avgpool = nn.AdaptiveAvgPool2d((time_dim, freq_dim))
self.encoder = None
self.encoder_output = last_conv_chn * freq_dim * time_dim
# self.fc1 = nn.Sequential(
# nn.Linear(self.encoder_output, embedding_size),
# nn.ReLU(),
# nn.BatchNorm1d(embedding_size)
# )
# self.fc1 = nn.Sequential(
# nn.Linear(self.encoder_output, embedding_size),
# nn.BatchNorm1d(embedding_size)
# )
self.fc = nn.Sequential(
nn.Linear(self.encoder_output, embedding_size),
nn.BatchNorm1d(embedding_size)
)
if self.transform == 'Linear':
self.trans_layer = nn.Sequential(
nn.Linear(embedding_size, embedding_size),
nn.ReLU(),
nn.BatchNorm1d(embedding_size)
)
elif self.transform == 'GhostVLAD':
self.trans_layer = GhostVLAD_v2(num_clusters=8, gost=1, dim=embedding_size, normalize_input=True)
else:
self.trans_layer = None
if self.alpha:
self.l2_norm = L2_Norm(self.alpha)
# self.fc = nn.Linear(self.inplanes * avg_size, embedding_size)
self.classifier = nn.Linear(self.embedding_size, num_classes)
for m in self.modules(): # 对于各层参数的初始化
if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm)): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
if self.filter_layer != None:
x = self.filter_layer(x)
if self.inst_layer != None:
x = self.inst_layer(x)
if self.mask_layer != None:
x = self.mask_layer(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if self.maxpool != None:
x = self.maxpool(x)
x = self.layer1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer3(x)
if self.layers[3] != 0:
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.layer4(x)
if self.dropout_p > 0:
x = self.dropout(x)
x = self.avgpool(x)
if self.encoder != None:
x = self.encoder(x)
x = x.view(x.size(0), -1)
# x = self.fc1(x)
x = self.fc(x)
if self.trans_layer != None:
x = self.trans_layer(x)
# x = t_x + x
if self.alpha:
x = self.l2_norm(x)
logits = self.classifier(x)
return logits, x
def xvector(self, x):
if self.filter_layer != None:
x = self.filter_layer(x)
if self.inst_layer != None:
x = self.inst_layer(x)
if self.mask_layer != None:
x = self.mask_layer(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if self.fast:
x = self.maxpool(x)
x = self.layer1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer3(x)
if self.layers[3] != 0:
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.layer4(x)
if self.dropout_p > 0:
x = self.dropout(x)
x = self.avgpool(x)
if self.encoder != None:
x = self.encoder(x)
x = x.view(x.size(0), -1)
# x = self.fc1(x)
embeddings = self.fc[0](x)
return "", embeddings
# previoud version for test
# class LocalResNet(nn.Module):
# """
# Define the ResNet model with A-softmax and AM-softmax loss.
# Added dropout as https://github.com/nagadomi/kaggle-cifar10-torch7 after average pooling and fc layer.
# """
#
# def __init__(self, embedding_size, num_classes,
# input_dim=161, block=BasicBlock,
# resnet_size=8, channels=[64, 128, 256], dropout_p=0.,
# inst_norm=False, alpha=12, stride=2, transform=False,
# avg_size=4, kernal_size=5, padding=2, **kwargs):
#
# super(LocalResNet, self).__init__()
# resnet_type = {8: [1, 1, 1, 0],
# 10: [1, 1, 1, 1],
# 18: [2, 2, 2, 2],
# 34: [3, 4, 6, 3],
# 50: [3, 4, 6, 3],
# 101: [3, 4, 23, 3]}
#
# layers = resnet_type[resnet_size]
# self.alpha = alpha
# self.layers = layers
# self.dropout_p = dropout_p
# self.transform = transform
#
# self.embedding_size = embedding_size
# # self.relu = nn.LeakyReLU()
# self.relu = nn.ReLU(inplace=True)
# self.inst_norm = inst_norm
# self.inst_layer = nn.InstanceNorm1d(input_dim)
#
# self.inplanes = channels[0]
# self.conv1 = nn.Conv2d(1, channels[0], kernel_size=(5, 5), stride=stride, padding=(3, 2))
# self.bn1 = nn.BatchNorm2d(channels[0])
# self.maxpool = nn.MaxPool2d(kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
#
# self.layer1 = self._make_layer(block, channels[0], layers[0])
#
# self.inplanes = channels[1]
# self.conv2 = nn.Conv2d(channels[0], channels[1], kernel_size=kernal_size, stride=2,
# padding=padding, bias=False)
# self.bn2 = nn.BatchNorm2d(channels[1])
# self.layer2 = self._make_layer(block, channels[1], layers[1])
#
# self.inplanes = channels[2]
# self.conv3 = nn.Conv2d(channels[1], channels[2], kernel_size=kernal_size, stride=2,
# padding=padding, bias=False)
# self.bn3 = nn.BatchNorm2d(channels[2])
# self.layer3 = self._make_layer(block, channels[2], layers[2])
#
# if layers[3] != 0:
# assert len(channels) == 4
# self.inplanes = channels[3]
# self.conv4 = nn.Conv2d(channels[2], channels[3], kernel_size=kernal_size, stride=2,
# padding=padding, bias=False)
# self.bn4 = nn.BatchNorm2d(channels[3])
# self.layer4 = self._make_layer(block=block, planes=channels[3], blocks=layers[3])
#
# self.dropout = nn.Dropout(self.dropout_p)
# self.avg_pool = nn.AdaptiveAvgPool2d((1, avg_size))
#
# self.fc = nn.Sequential(
# nn.Linear(self.inplanes * avg_size, embedding_size),
# nn.BatchNorm1d(embedding_size)
# )
#
# if self.transform:
# self.trans_layer = nn.Sequential(
# nn.Linear(embedding_size, embedding_size, bias=False),
# nn.BatchNorm1d(embedding_size),
# nn.ReLU()
# )
#
# # self.fc = nn.Linear(self.inplanes * avg_size, embedding_size)
# self.classifier = nn.Linear(self.embedding_size, num_classes)
#
# for m in self.modules(): # 对于各层参数的初始化
# if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
# # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# # m.weight.data.normal_(0, math.sqrt(2. / n))
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm)): # weight设置为1,bias为0
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
# def l2_norm(self, input, alpha=1.0):
# # alpha = log(p * ( class -2) / (1-p))
# input_size = input.size()
# buffer = torch.pow(input, 2)
#
# normp = torch.sum(buffer, 1).add_(1e-12)
# norm = torch.sqrt(normp)
#
# _output = torch.div(input, norm.view(-1, 1).expand_as(input))
# output = _output.view(input_size)
# # # # input = input.renorm(p=2, dim=1, maxnorm=1.0)
# # norm = input.norm(p=2, dim=1, keepdim=True).add(1e-14)
# # output = input / norm
#
# return output * alpha
#
# def _make_layer(self, block, planes, blocks, stride=1):
# downsample = None
# if stride != 1 or self.inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# conv1x1(self.inplanes, planes * block.expansion, stride),
# nn.BatchNorm2d(planes * block.expansion),
# )
#
# layers = []
# layers.append(block(self.inplanes, planes, stride, downsample))
# self.inplanes = planes * block.expansion
# for _ in range(1, blocks):
# layers.append(block(self.inplanes, planes))
#
# return nn.Sequential(*layers)
#
# def forward(self, x):
# if self.inst_norm:
# x = x.squeeze(1).transpose(1, 2)
# x = self.inst_layer(x)
# x = x.transpose(1, 2).unsqueeze(1)
#
# # x = x - torch.mean(x, dim=-2, keepdim=True)
#
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
# x = self.maxpool(x)
#
# x = self.layer1(x)
#
# x = self.conv2(x)
# x = self.bn2(x)
# x = self.relu(x)
# x = self.layer2(x)
#
# x = self.conv3(x)
# x = self.bn3(x)
# x = self.relu(x)
# x = self.layer3(x)
#
# if self.layers[3] != 0:
# x = self.conv4(x)
# x = self.bn4(x)
# x = self.relu(x)
# x = self.layer4(x)
#
# if self.dropout_p > 0:
# x = self.dropout(x)
#
# # if self.statis_pooling:
# # mean_x = self.avg_pool(x)
# # mean_x = mean_x.view(mean_x.size(0), -1)
# #
# # std_x = self.std_pool(x)
# # std_x = std_x.view(std_x.size(0), -1)
# #
# # x = torch.cat((mean_x, std_x), dim=1)
# #
# # else:
# # print(x.shape)
# x = self.avg_pool(x)
# x = x.view(x.size(0), -1)
#
# x = self.fc(x)
# if self.transform == True:
# x += self.trans_layer(x)
# t_x = self.trans_layer(x)
# x = t_x + x
#
# if self.alpha:
# x = self.l2_norm(x, alpha=self.alpha)
#
# logits = self.classifier(x)
#
# return logits, x
class DomainNet(nn.Module):
"""
Define the ResNet model with A-softmax and AM-softmax loss.
Added dropout as https://github.com/nagadomi/kaggle-cifar10-torch7 after average pooling and fc layer.
"""
def __init__(self, model, embedding_size, num_classes_a, num_classes_b, **kwargs):
super(DomainNet, self).__init__()
self.xvectors = model
self.embedding_size = embedding_size
self.grl = GRL(lambda_=0.)
self.classifier_dom = nn.Sequential(
nn.Linear(self.embedding_size, int(self.embedding_size / 2)),
nn.ReLU(inplace=True),
nn.BatchNorm1d(int(self.embedding_size / 2)),
nn.Linear(int(self.embedding_size / 2), num_classes_b),
)
self.fc2 = nn.Sequential(
nn.Linear(int(num_classes_b + self.embedding_size), self.embedding_size),
nn.ReLU(inplace=True),
nn.BatchNorm1d(self.embedding_size)
)
self.classifier_spk = nn.Linear(self.embedding_size, num_classes_a)
for m in self.modules(): # 对于各层参数的初始化
if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm)): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
logits, embeddings = self.xvectors(x)
# dom_x = self.grl(embeddings)
dom_logits = self.classifier_dom(embeddings)
spk_embeddings_new = torch.cat((embeddings, dom_logits), dim=1)
spk_embeddings_new = self.fc2(spk_embeddings_new)
spk_logits_new = self.classifier_spk(spk_embeddings_new)
dom_logits_new = self.classifier_dom(spk_embeddings_new)
all_logits = (logits, spk_logits_new, dom_logits, dom_logits_new)
return all_logits, spk_embeddings_new
class GradResNet(nn.Module):
"""
Define the ResNet model with A-softmax and AM-softmax loss.
Added dropout as https://github.com/nagadomi/kaggle-cifar10-torch7 after average pooling and fc layer.
"""
def __init__(self, embedding_size, num_classes, block=BasicBlock, input_dim=161,
resnet_size=8, channels=[64, 128, 256], dropout_p=0., ince=False, transform=False,
inst_norm=False, alpha=12, vad=False, avg_size=4, kernal_size=5, padding=2, **kwargs):
super(GradResNet, self).__init__()
resnet_type = {8: [1, 1, 1, 0],
10: [1, 1, 1, 1],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3]}
layers = resnet_type[resnet_size]
self.ince = ince
self.alpha = alpha
self.layers = layers
self.dropout_p = dropout_p
self.transform = transform
self.embedding_size = embedding_size
# self.relu = nn.LeakyReLU()
self.relu = nn.ReLU(inplace=True)
self.vad = vad
if self.vad:
self.vad_layer = SelfVadPooling(input_dim)
self.inst_norm = inst_norm
# self.inst_layer = nn.InstanceNorm1d(input_dim)
if self.ince:
self.pre_conv = VarSizeConv(1, 1)
self.conv1 = nn.Conv2d(4, channels[0], kernel_size=5, stride=2, padding=2)
else:
self.conv1 = nn.Conv2d(1, channels[0], kernel_size=5, stride=2, padding=2)
self.bn1 = nn.BatchNorm2d(channels[0])
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inplanes = channels[0]
self.layer1 = self._make_layer(block, channels[0], layers[0])
self.inplanes = channels[1]
self.conv2 = nn.Conv2d(channels[0], channels[1], kernel_size=kernal_size,
stride=2, padding=padding, bias=False)
self.bn2 = nn.BatchNorm2d(channels[1])
self.layer2 = self._make_layer(block, channels[1], layers[1])
self.inplanes = channels[2]
self.conv3 = nn.Conv2d(channels[1], channels[2], kernel_size=kernal_size,
stride=2, padding=padding, bias=False)
self.bn3 = nn.BatchNorm2d(channels[2])
self.layer3 = self._make_layer(block, channels[2], layers[2])
if layers[3] != 0:
assert len(channels) == 4
self.inplanes = channels[3]
self.conv4 = nn.Conv2d(channels[2], channels[3], kernel_size=kernal_size, stride=2,
padding=padding, bias=False)
self.bn4 = nn.BatchNorm2d(channels[3])
self.layer4 = self._make_layer(block=block, planes=channels[3], blocks=layers[3])
self.dropout = nn.Dropout(self.dropout_p)
self.avg_pool = nn.AdaptiveAvgPool2d((1, avg_size))
<<<<<<< HEAD
class InstBlock3x3(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(InstBlock3x3, self).__init__()
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = nn.InstanceNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.InstanceNorm2d(planes)
self.conv3 = conv3x3(planes, planes)
self.bn3 = nn.InstanceNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class VarSizeConv(nn.Module):
def __init__(self, inplanes, planes, stride=1, kernel_size=[3, 5, 7]):
super(VarSizeConv, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size[0], stride=stride, padding=1)
self.bn1 = nn.InstanceNorm2d(planes)
self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size[1], stride=stride, padding=2)
self.bn2 = nn.InstanceNorm2d(planes)
self.conv3 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size[2], stride=stride, padding=3)
self.bn3 = nn.InstanceNorm2d(planes)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bn1(x1)
x2 = self.conv2(x)
x2 = self.bn2(x2)
x3 = self.conv3(x)
x3 = self.bn3(x3)
return torch.cat([x1, x2, x3], dim=1)
class ResNet20(nn.Module):
def __init__(self, num_classes=1000, embedding_size=128, dropout_p=0.0,
block=BasicBlock, input_frames=300, **kwargs):
super(ResNet20, self).__init__()
self.dropout_p = dropout_p
self.inplanes = 1
self.layer1 = self._make_layer(Block3x3, planes=64, blocks=1, stride=2)
=======
if self.transform:
self.trans_layer = nn.Sequential(
nn.Linear(embedding_size, embedding_size, bias=False),
nn.BatchNorm1d(embedding_size),
nn.ReLU()
)
>>>>>>> Server/Server
self.fc = nn.Sequential(
nn.Linear(self.inplanes * avg_size, embedding_size),
nn.BatchNorm1d(embedding_size)
)
# self.fc = nn.Linear(self.inplanes * avg_size, embedding_size)
self.classifier = nn.Linear(self.embedding_size, num_classes)
for m in self.modules(): # 对于各层参数的初始化
if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm)): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
def l2_norm(self, input, alpha=1.0):
# alpha = log(p * (class -2) / (1-p))
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
# # # input = input.renorm(p=2, dim=1, maxnorm=1.0)
# norm = input.norm(p=2, dim=1, keepdim=True).add(1e-14)
# output = input / norm
return output * alpha
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
if self.vad:
x = self.vad_layer(x)
x = torch.log(x)
if self.inst_norm:
# x = self.inst_layer(x)
x = x - torch.mean(x, dim=-2, keepdim=True)
if self.ince:
x = self.pre_conv(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# x = self.maxpool(x)
x = self.layer1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer3(x)
if self.layers[3] != 0:
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.layer4(x)
if self.dropout_p > 0:
x = self.dropout(x)
# if self.statis_pooling:
# mean_x = self.avg_pool(x)
# mean_x = mean_x.view(mean_x.size(0), -1)
#
# std_x = self.std_pool(x)
# std_x = std_x.view(std_x.size(0), -1)
#
# x = torch.cat((mean_x, std_x), dim=1)
#
# else:
# print(x.shape)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
if self.transform:
t_x = self.trans_layer(x)
x = t_x + x
if self.alpha:
x = self.l2_norm(x, alpha=self.alpha)
logits = self.classifier(x)
return logits, x
class TimeFreqResNet(nn.Module):
"""
Define the ResNet model with A-softmax and AM-softmax loss.
Added dropout as https://github.com/nagadomi/kaggle-cifar10-torch7 after average pooling and fc layer.
"""
<<<<<<< HEAD
def __init__(self, embedding_size, num_classes,
input_dim=161, block=BasicBlock,
resnet_size=8, channels=[64, 128, 256], dropout_p=0.,
inst_norm=False, alpha=12,
avg_size=4, kernal_size=5, padding=2, **kwargs):
=======
def __init__(self, embedding_size, num_classes, block=BasicBlock, input_dim=161,
resnet_size=8, channels=[64, 128, 256], dropout_p=0., ince=False,
inst_norm=False, alpha=12, vad=False, avg_size=4, kernal_size=5, padding=2, **kwargs):
>>>>>>> Server/Server
super(TimeFreqResNet, self).__init__()
resnet_type = {8: [1, 1, 1, 0],
10: [1, 1, 1, 1],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3]}
layers = resnet_type[resnet_size]
self.ince = ince
self.alpha = alpha
self.layers = layers
self.dropout_p = dropout_p
self.embedding_size = embedding_size
# self.relu = nn.LeakyReLU()
self.relu = nn.ReLU(inplace=True)
<<<<<<< HEAD
self.inst_norm = inst_norm
self.inst_layer = nn.InstanceNorm2d(input_dim)
self.inplanes = channels[0]
self.conv1 = nn.Conv2d(1, channels[0], kernel_size=(5, 5), stride=2, padding=(3, 2))
self.bn1 = nn.BatchNorm2d(channels[0])
self.maxpool = nn.MaxPool2d(kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
=======
self.vad = vad
if self.vad:
self.vad_layer = SelfVadPooling(input_dim)
self.inst_norm = inst_norm
# self.inst_layer = nn.InstanceNorm1d(input_dim)
self.conv1 = nn.Sequential(nn.Conv2d(1, channels[0], kernel_size=(5, 1), stride=(2, 1), padding=(2, 0)),
nn.BatchNorm2d(channels[0]),
nn.Conv2d(channels[0], channels[0], kernel_size=(1, 5), stride=(1, 2),
padding=(0, 2)),
)
self.bn1 = nn.BatchNorm2d(channels[0])
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inplanes = channels[0]
>>>>>>> Server/Server
self.layer1 = self._make_layer(block, channels[0], layers[0])
self.inplanes = channels[1]
# self.conv2 = nn.Conv2d(channels[0], channels[1], kernel_size=kernal_size,
# stride=2, padding=padding, bias=False)
self.conv2 = nn.Sequential(
nn.Conv2d(channels[0], channels[1], kernel_size=(5, 1), stride=(2, 1), padding=(2, 0)),
nn.BatchNorm2d(channels[1]),
nn.Conv2d(channels[1], channels[1], kernel_size=(1, 5), stride=(1, 2),
padding=(0, 2)),
)
self.bn2 = nn.BatchNorm2d(channels[1])
self.layer2 = self._make_layer(block, channels[1], layers[1])
self.inplanes = channels[2]
# self.conv3 = nn.Conv2d(channels[1], channels[2], kernel_size=kernal_size,
# stride=2, padding=padding, bias=False)
self.conv3 = nn.Sequential(
nn.Conv2d(channels[1], channels[2], kernel_size=(5, 1), stride=(2, 1), padding=(2, 0)),
nn.BatchNorm2d(channels[2]),
nn.Conv2d(channels[2], channels[2], kernel_size=(1, 5), stride=(1, 2), padding=(0, 2)),
)
self.bn3 = nn.BatchNorm2d(channels[2])
self.layer3 = self._make_layer(block, channels[2], layers[2])
if layers[3] != 0:
assert len(channels) == 4
self.inplanes = channels[3]
self.conv4 = nn.Conv2d(channels[2], channels[3], kernel_size=kernal_size, stride=2,
padding=padding, bias=False)
self.bn4 = nn.BatchNorm2d(channels[3])
self.layer4 = self._make_layer(block=block, planes=channels[3], blocks=layers[3])
self.dropout = nn.Dropout(self.dropout_p)
self.avg_pool = nn.AdaptiveAvgPool2d((1, avg_size))
self.fc = nn.Sequential(
nn.Linear(self.inplanes * avg_size, embedding_size),
nn.BatchNorm1d(embedding_size)
)
# self.fc = nn.Linear(self.inplanes * avg_size, embedding_size)
self.classifier = nn.Linear(self.embedding_size, num_classes)
for m in self.modules(): # 对于各层参数的初始化
if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm)): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
def l2_norm(self, input, alpha=1.0):
<<<<<<< HEAD
# alpha = log(p * ( class -2) / (1-p))
=======
# alpha = log(p * (class -2) / (1-p))
>>>>>>> Server/Server
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
<<<<<<< HEAD
# # # input = input.renorm(p=2, dim=1, maxnorm=1.0)
# norm = input.norm(p=2, dim=1, keepdim=True).add(1e-14)
# output = input / norm
=======
>>>>>>> Server/Server
return output * alpha
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
<<<<<<< HEAD
if self.inst_norm:
x = x.squeeze(1)
x = self.inst_layer(x)
x = x.unsqueeze(1)
=======
if self.vad:
x = self.vad_layer(x)
x = torch.log(x)
if self.inst_norm:
# x = self.inst_layer(x)
x = x - torch.mean(x, dim=-2, keepdim=True)
if self.ince:
x = self.pre_conv(x)
>>>>>>> Server/Server
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer3(x)
if self.layers[3] != 0:
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.layer4(x)
if self.dropout_p > 0:
x = self.dropout(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
if self.alpha:
x = F.self.l2_norm(x, alpha=self.alpha)
logits = self.classifier(x)
return logits, x
<<<<<<< HEAD
class DomainResNet(nn.Module):
=======
class MultiResNet(nn.Module):
>>>>>>> Server/Server
"""
Define the ResNet model with A-softmax and AM-softmax loss.
Added dropout as https://github.com/nagadomi/kaggle-cifar10-torch7 after average pooling and fc layer.
"""
<<<<<<< HEAD
def __init__(self, embedding_size_a, embedding_size_b, embedding_size_o,
num_classes_a, num_classes_b,
block=BasicBlock, input_dim=161,
resnet_size=8, channels=[64, 128, 256], dropout_p=0.,
inst_norm=False, alpha=12,
avg_size=4, kernal_size=5, padding=2, **kwargs):
=======
def __init__(self, embedding_size, num_classes_a, num_classes_b, block=BasicBlock, input_dim=161,
resnet_size=8, channels=[64, 128, 256], dropout_p=0., stride=2, fast=False,
inst_norm=False, alpha=12, input_norm='None', transform=False,
avg_size=4, kernal_size=5, padding=2, mask='None', mask_len=25, **kwargs):
>>>>>>> Server/Server
super(MultiResNet, self).__init__()
resnet_type = {8: [1, 1, 1, 0],
10: [1, 1, 1, 1],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3]}
layers = resnet_type[resnet_size]
self.alpha = alpha
self.layers = layers
self.dropout_p = dropout_p
self.embedding_size = embedding_size
self.relu = nn.ReLU(inplace=True)
<<<<<<< HEAD
self.inst_norm = inst_norm
# self.inst_layer = nn.InstanceNorm1d(input_dim)
=======
self.transform = transform
self.fast = fast
self.input_norm = input_norm
self.mask = mask
if input_norm == 'Instance':
self.inst_layer = nn.InstanceNorm1d(input_dim)
elif input_norm == 'Mean':
self.inst_layer = Mean_Norm()
elif input_norm == 'MeanStd':
self.inst_layer = MeanStd_Norm()
else:
self.inst_layer = None
if self.mask == "time":
self.maks_layer = TimeMaskLayer(mask_len=mask_len)
elif self.mask == "freq":
self.mask_layer = FreqMaskLayer(mask_len=mask_len)
elif self.mask == "time_freq":
self.mask_layer = nn.Sequential(
TimeMaskLayer(mask_len=mask_len),
FreqMaskLayer(mask_len=mask_len)
)
else:
self.mask_layer = None
>>>>>>> Server/Server
self.inplanes = channels[0]
self.conv1 = nn.Conv2d(1, channels[0], kernel_size=5, stride=stride, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(channels[0])
<<<<<<< HEAD
self.maxpool = nn.MaxPool2d(kernel_size=(3, 1), stride=(2, 1), padding=1)
=======
# fast v3
if self.fast:
self.maxpool = nn.Sequential(
nn.Conv2d(channels[0], channels[0], kernel_size=1, stride=1),
nn.ReLU(),
nn.BatchNorm2d(channels[0]),
nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
)
else:
self.maxpool = None
# self.maxpool = nn.MaxPool2d(kernel_size=(3, 1), stride=(2, 1), padding=1)
>>>>>>> Server/Server
self.layer1 = self._make_layer(block, channels[0], layers[0])
self.inplanes = channels[1]
self.conv2 = nn.Conv2d(channels[0], channels[1], kernel_size=kernal_size, stride=2,
padding=padding, bias=False)
self.bn2 = nn.BatchNorm2d(channels[1])
self.layer2 = self._make_layer(block, channels[1], layers[1])
self.inplanes = channels[2]
self.conv3 = nn.Conv2d(channels[1], channels[2], kernel_size=kernal_size, stride=2,
padding=padding, bias=False)
self.bn3 = nn.BatchNorm2d(channels[2])
self.layer3 = self._make_layer(block, channels[2], layers[2])
if layers[3] != 0:
assert len(channels) == 4
self.inplanes = channels[3]
self.conv4 = nn.Conv2d(channels[2], channels[3], kernel_size=kernal_size, stride=2,
padding=padding, bias=False)
self.bn4 = nn.BatchNorm2d(channels[3])
self.layer4 = self._make_layer(block=block, planes=channels[3], blocks=layers[3])
self.dropout = nn.Dropout(self.dropout_p)
self.avg_pool = nn.AdaptiveAvgPool2d((avg_size, 1))
# self.encoder = nn.LSTM(input_size=channels[2],
# hidden_size=channels[2],
# num_layers=1,
# batch_first=True,
# dropout=self.dropout_p)
self.fc = nn.Sequential(
nn.Linear(self.inplanes * avg_size, self.embedding_size),
nn.BatchNorm1d(self.embedding_size)
)
if self.transform == 'Linear':
self.trans_layer = nn.Sequential(
nn.Linear(embedding_size, embedding_size),
nn.ReLU(),
nn.BatchNorm1d(embedding_size))
elif self.transform == 'GhostVLAD':
self.trans_layer = GhostVLAD_v2(num_clusters=8, gost=1, dim=embedding_size, normalize_input=True)
else:
self.trans_layer = None
if self.alpha:
self.l2_norm = L2_Norm(self.alpha)
<<<<<<< HEAD
self.classifier_spk = nn.Linear(self.embedding_size_a, num_classes_a)
self.grl = GRL(lambda_=0.)
self.classifier_dom = nn.Sequential(nn.Linear(self.embedding_size_b, int(self.embedding_size_b / 4)),
nn.ReLU(inplace=True),
nn.Linear(int(self.embedding_size_b / 4), num_classes_b),
)
=======
self.classifier_a = nn.Linear(self.embedding_size, num_classes_a)
self.classifier_b = nn.Linear(self.embedding_size, num_classes_b)
>>>>>>> Server/Server
for m in self.modules(): # 对于各层参数的初始化
if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm)): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
<<<<<<< HEAD
if self.inst_norm:
# x = x.squeeze(1)
# x = self.inst_layer(x)
# x = x.unsqueeze(1)
x = x - torch.mean(x, dim=-2, keepdim=True)
=======
tuple_input = False
if isinstance(x, tuple):
tuple_input = True
size_a = len(x[0])
x = torch.cat(x, dim=0)
if self.inst_layer != None:
x = self.inst_layer(x)
if self.mask_layer != None:
x = self.mask_layer(x)
>>>>>>> Server/Server
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
<<<<<<< HEAD
x = self.maxpool(x)
=======
if self.maxpool != None:
x = self.maxpool(x)
>>>>>>> Server/Server
x = self.layer1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer3(x)
if self.layers[3] != 0:
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.layer4(x)
if self.dropout_p > 0:
x = self.dropout(x)
# x = self.avg_pool(x).transpose(1, 2)
# x, (_, _) = self.encoder(x.squeeze(1))
# x = x[:, -1]
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
embeddings = self.fc(x)
if self.trans_layer != None:
embeddings = self.trans_layer(embeddings)
if self.alpha:
embeddings = self.l2_norm(embeddings)
# embeddings = self.l2_norm(embeddings, alpha=self.alpha)
<<<<<<< HEAD
spk_logits = self.classifier_spk(spk_x)
dom_x = self.grl(dom_x)
dom_logits = self.classifier_dom(dom_x)
return spk_logits, spk_x, dom_logits, dom_x
class GradResNet(nn.Module):
"""
Define the ResNet model with A-softmax and AM-softmax loss.
Added dropout as https://github.com/nagadomi/kaggle-cifar10-torch7 after average pooling and fc layer.
"""
def __init__(self, embedding_size, num_classes, block=BasicBlock, input_dim=161,
resnet_size=8, channels=[64, 128, 256], dropout_p=0., ince=False,
inst_norm=False, alpha=12, vad=False, avg_size=4, kernal_size=5, padding=2, **kwargs):
super(GradResNet, self).__init__()
resnet_type = {8: [1, 1, 1, 0],
10: [1, 1, 1, 1],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3]}
layers = resnet_type[resnet_size]
self.ince = ince
self.alpha = alpha
self.layers = layers
self.dropout_p = dropout_p
self.embedding_size = embedding_size
# self.relu = nn.LeakyReLU()
self.relu = nn.ReLU(inplace=True)
self.vad = vad
if self.vad:
self.vad_layer = SelfVadPooling(input_dim)
self.inst_norm = inst_norm
# self.inst_layer = nn.InstanceNorm1d(input_dim)
if self.ince:
self.pre_conv = VarSizeConv(1, 1)
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=5, stride=2, padding=2)
else:
self.conv1 = nn.Conv2d(1, channels[0], kernel_size=5, stride=2, padding=2)
self.bn1 = nn.BatchNorm2d(channels[0])
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inplanes = channels[0]
self.layer1 = self._make_layer(block, channels[0], layers[0])
self.inplanes = channels[1]
self.conv2 = nn.Conv2d(channels[0], channels[1], kernel_size=kernal_size,
stride=2, padding=padding, bias=False)
self.bn2 = nn.BatchNorm2d(channels[1])
self.layer2 = self._make_layer(block, channels[1], layers[1])
self.inplanes = channels[2]
self.conv3 = nn.Conv2d(channels[1], channels[2], kernel_size=kernal_size,
stride=2, padding=padding, bias=False)
self.bn3 = nn.BatchNorm2d(channels[2])
self.layer3 = self._make_layer(block, channels[2], layers[2])
if layers[3] != 0:
assert len(channels) == 4
self.inplanes = channels[3]
self.conv4 = nn.Conv2d(channels[2], channels[3], kernel_size=kernal_size, stride=2,
padding=padding, bias=False)
self.bn4 = nn.BatchNorm2d(channels[3])
self.layer4 = self._make_layer(block=block, planes=channels[3], blocks=layers[3])
self.dropout = nn.Dropout(self.dropout_p)
self.avg_pool = nn.AdaptiveAvgPool2d((1, avg_size))
self.fc = nn.Sequential(
nn.Linear(self.inplanes * avg_size, embedding_size),
nn.BatchNorm1d(embedding_size)
)
# self.fc = nn.Linear(self.inplanes * avg_size, embedding_size)
self.classifier = nn.Linear(self.embedding_size, num_classes)
for m in self.modules(): # 对于各层参数的初始化
if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm)): # weight设置为1,bias为0
m.weight.data.fill_(1)
m.bias.data.zero_()
def l2_norm(self, input, alpha=1.0):
# alpha = log(p * (class -2) / (1-p))
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
# # # input = input.renorm(p=2, dim=1, maxnorm=1.0)
# norm = input.norm(p=2, dim=1, keepdim=True).add(1e-14)
# output = input / norm
return output * alpha
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
if self.vad:
x = self.vad_layer(x)
x = torch.log(x)
if self.inst_norm:
# x = self.inst_layer(x)
x = x - torch.mean(x, dim=-2, keepdim=True)
if self.ince:
x = self.pre_conv(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# x = self.maxpool(x)
x = self.layer1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer3(x)
if self.layers[3] != 0:
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.layer4(x)
if self.dropout_p > 0:
x = self.dropout(x)
# if self.statis_pooling:
# mean_x = self.avg_pool(x)
# mean_x = mean_x.view(mean_x.size(0), -1)
#
# std_x = self.std_pool(x)
# std_x = std_x.view(std_x.size(0), -1)
#
# x = torch.cat((mean_x, std_x), dim=1)
#
# else:
# print(x.shape)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
if self.alpha:
x = self.l2_norm(x, alpha=self.alpha)
logits = self.classifier(x)
return logits, x
=======
if tuple_input:
embeddings_a = embeddings[:size_a]
embeddings_b = embeddings[size_a:]
logits_a = self.classifier_a(embeddings_a)
logits_b = self.classifier_b(embeddings_b)
return (logits_a, logits_b), (embeddings_a, embeddings_b)
else:
return '', embeddings
# def cls_forward(self, a, b):
#
# logits_a = self.classifier_a(a)
# logits_b = self.classifier_b(b)
#
# return logits_a, logits_b
>>>>>>> Server/Server
| 35.4141
| 120
| 0.564015
| 11,094
| 87,402
| 4.301154
| 0.039931
| 0.0241
| 0.017352
| 0.015843
| 0.88503
| 0.856571
| 0.833707
| 0.815139
| 0.798961
| 0.786512
| 0
| 0.040512
| 0.308917
| 87,402
| 2,467
| 121
| 35.428456
| 0.749478
| 0
| 0
| 0.7737
| 0
| 0
| 0.009895
| 0.000772
| 0
| 0
| 0
| 0
| 0.00367
| 0
| null | null | 0
| 0.007339
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
daaab5fe1c32b327e94ac561104c3ed2d8752efa
| 12,337
|
py
|
Python
|
jams/srrasa.py
|
MuellerSeb/jams_python
|
1bca04557da79d8f8a4c447f5ccc517c40ab7dfc
|
[
"MIT"
] | 9
|
2019-06-03T03:24:16.000Z
|
2021-12-03T07:14:00.000Z
|
jams/srrasa.py
|
MuellerSeb/jams_python
|
1bca04557da79d8f8a4c447f5ccc517c40ab7dfc
|
[
"MIT"
] | 6
|
2020-03-25T21:56:59.000Z
|
2021-11-08T14:58:27.000Z
|
jams/srrasa.py
|
MuellerSeb/jams_python
|
1bca04557da79d8f8a4c447f5ccc517c40ab7dfc
|
[
"MIT"
] | 5
|
2019-10-17T12:04:33.000Z
|
2021-09-28T07:45:07.000Z
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
def srrasa(xy, strata=5, n=3, plot=False):
"""
Generates stratified random 2D points within a given rectangular area.
Definition
----------
def srrasa(xy, strata=5, n=3, plot=False):
Input
-----
xy list of floats (4), list with the x and y coordinates
enclosing the designated rectangle in the form [x1,x2,y1,y2]
Optional Input
--------------
strata int, number of strata per axis
n int, number of random points in each strata
plot bool, if True, stratas and points are plotted,
otherwise not
Output
------
rand_xy ndarray (n,2), x and y coordinates of the stratified random
points in the given rectangular.
Examples
--------
>>> # seed for reproducible results in doctest
>>> np.random.seed(1)
>>> # gives within the rectangle of the given coordinates
>>> # 16 (4**2) stratas with 3 random points in each one.
>>> rand_xy = srrasa([652219.,652290.,5772970.,5773040.], strata=4, n=3, plot=False)
>>> from autostring import astr
>>> print(astr(rand_xy[0:4,0:2],6,pp=True))
[['6.522264e+05' '5.772975e+06']
['6.522318e+05' '5.772973e+06']
['6.522190e+05' '5.772972e+06']
['6.522401e+05' '5.772979e+06']]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 Arndt Piayda, Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, AP, Nov 2012
Modified, MC, Nov 2012 - default plot=False
AP, Dec 2012 - documentation change
MC, Feb 2013 - docstring
MC, Feb 2013 - ported to Python 3
"""
# calculate strata steps
sw = (xy[1]-xy[0])/strata
sh = (xy[3]-xy[2])/strata
xsteps = np.arange(xy[0],xy[1]+sw,sw)
ysteps = np.arange(xy[2],xy[3]+sh,sh)
# make output array
rand_xy = np.empty((strata**2*n,2))
# throw random points in each strata
for j in range(strata):
for i in range(strata):
rand_xy[i*n+strata*n*j:(i+1)*n+strata*n*j,0] = (xsteps[i+1] - xsteps[i])*np.random.random(n) + xsteps[i]
rand_xy[i*n+strata*n*j:(i+1)*n+strata*n*j,1] = (ysteps[j+1] - ysteps[j])*np.random.random(n) + ysteps[j]
# plot stratas and random points within
if plot:
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('font', size=20)
mpl.rc('lines', linewidth=2)
mpl.rc('axes', linewidth=1.5)
mpl.rc('xtick.major', width=1.5)
mpl.rc('ytick.major', width=1.5)
mpl.rcParams['lines.markersize']=6
fig = plt.figure('stratified random sampling')
sub = fig.add_subplot(111, aspect='equal')
sub.set_xlim(xy[0],xy[1])
sub.set_ylim(xy[2],xy[3])
for i in range(strata):
sub.axhline(y=ysteps[i], color=(166/256., 206/256., 227/256.))
sub.axvline(x=xsteps[i], color=(166/256., 206/256., 227/256.))
sub.scatter(rand_xy[:,0],rand_xy[:,1],marker='+', s=60,
color=( 51/256., 160/256., 44/256.))
sub.set_xlabel('X')
sub.set_ylabel('Y')
sub.set_title('strata = %i, n = %i' %(strata,n))
sub.xaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub.yaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
fig.autofmt_xdate(rotation=45)
plt.tight_layout(pad=1, h_pad=0, w_pad=0)
plt.show()
return rand_xy
def srrasa_trans(xy,strata=5,n=3,num=3,rl=0.5,silent=True,plot=False):
"""
Generates stratified random 2D transects within a given rectangular
area.
Definition
----------
def srrasa(xy,strata=5,n=3,num=3,rl=0.5,silent=True,plot=False):
Input
-----
xy list of floats (4), list with the x and y coordinates
enclosing the designated rectangle in the form [x1,x2,y1,y2]
Optional Input
--------------
strata int, number of strata per axis
n int, number of random transects in each strata
num int, number of points in each transect
rl float [0. to 1.], relative length of transect with respect
to width of stratum
silent bool, if False, runtime diagnostics are printed to the
console, otherwise not
plot bool, if True, stratas and points are plotted,
otherwise not
Output
------
rand_xy ndarray (n,2), x and y coordinates of the stratified random
transect points in the given rectangular.
Examples
--------
>>> # seed for reproducible results in doctest
>>> np.random.seed(1)
>>> # gives within the rectangle of the given coordinates
>>> # 16 (4**2) stratas with 3 random transects in each one.
>>> # Each transect is 0.5*width_of_strata long and contains 5 points logarithmical distributed.
>>> rand_xy = srrasa_trans([652219.,652290.,5772970.,5773040.], strata=4,
... n=3, num=5, rl=0.5, silent=True, plot=False)
>>> from autostring import astr
>>> print(astr(rand_xy[0:4,0:2],6,pp=True))
[['6.522264e+05' '5.772983e+06']
['6.522276e+05' '5.772983e+06']
['6.522292e+05' '5.772983e+06']
['6.522315e+05' '5.772983e+06']]
License
-------
This file is part of the JAMS Python package, distributed under the MIT License.
Copyright (c) 2012-2013 Arndt Piayda, Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, AP, Nov 2012
Modified, AP, Dec 2012 - documentation change
MC, Feb 2013 - ported to Python 3
"""
# calculate strata steps
sw = (xy[1]-xy[0])/strata
sh = (xy[3]-xy[2])/strata
xsteps = np.arange(xy[0],xy[1]+sw,sw)
ysteps = np.arange(xy[2],xy[3]+sh,sh)
tl = sw*rl
# make output array
rand_xy = np.empty((strata**2*n*num,2))
o = 0
for j in range(strata):
for i in range(strata):
for k in range(n):
goon = True
it = 0
while goon:
# random seed in strata
seedx=(xsteps[i+1]-xsteps[i])*np.random.random(1)+xsteps[i]
seedy=(ysteps[j+1]-ysteps[j])*np.random.random(1)+ysteps[j]
# make logarithmic transect
tx =np.arange(1,num+1)
dis =np.sort(tl-np.log(tx)/np.max(np.log(tx))*tl)
seedx=np.repeat(seedx,num)+dis
seedy=np.repeat(seedy,num)
# random angle in strata [deg]
angle = 360 * np.random.random(1)
# rotate transect to random angle
seedx_trans = (-(seedy-seedy[0])*np.sin(np.deg2rad(angle))+
(seedx-seedx[0])*np.cos(np.deg2rad(angle))+
seedx[0])
seedy_trans = ((seedy-seedy[0])*np.cos(np.deg2rad(angle))+
(seedx-seedx[0])*np.sin(np.deg2rad(angle))+
seedy[0])
# test if transect is in strata
if (((seedx_trans>xsteps[i]).all()) &
((seedx_trans<xsteps[i+1]).all()) &
((seedy_trans>ysteps[j]).all()) &
((seedy_trans<ysteps[j+1]).all())):
goon = False
if not silent:
print('strata= (', i, ',', j, ')', ' it= ', it)
it += 1
rand_xy[o:o+num,0] = seedx_trans
rand_xy[o:o+num,1] = seedy_trans
o += num
# plot stratas and random transect points within
if plot:
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('font', size=20)
mpl.rc('lines', linewidth=2)
mpl.rc('axes', linewidth=1.5)
mpl.rc('xtick.major', width=1.5)
mpl.rc('ytick.major', width=1.5)
mpl.rcParams['lines.markersize']=6
fig = plt.figure('stratified random transect sampling')
sub = fig.add_subplot(111, aspect='equal')
sub.set_xlim(xy[0],xy[1])
sub.set_ylim(xy[2],xy[3])
for i in range(strata):
sub.axhline(y=ysteps[i], color=(166/256., 206/256., 227/256.))
sub.axvline(x=xsteps[i], color=(166/256., 206/256., 227/256.))
sub.scatter(rand_xy[:,0],rand_xy[:,1],marker='+', s=60,
color=( 51/256., 160/256., 44/256.))
sub.set_xlabel('X')
sub.set_ylabel('Y')
sub.set_title('strata = %i, n = %i, num = %i' %(strata,n,num))
sub.xaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub.yaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
fig.autofmt_xdate(rotation=45)
plt.tight_layout(pad=1, h_pad=0, w_pad=0)
plt.show()
return rand_xy
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| 39.289809
| 116
| 0.564481
| 1,650
| 12,337
| 4.176364
| 0.213939
| 0.015673
| 0.011319
| 0.005805
| 0.806995
| 0.782325
| 0.771151
| 0.75722
| 0.720795
| 0.716587
| 0
| 0.061672
| 0.320499
| 12,337
| 313
| 117
| 39.415335
| 0.760348
| 0.487801
| 0
| 0.594595
| 1
| 0
| 0.047358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018018
| false
| 0
| 0.063063
| 0
| 0.099099
| 0.018018
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dae75e638dadae084b5ab4a559caf1fdf3809036
| 107
|
py
|
Python
|
Chapter04/filesdirs_03.py
|
vabyte/Modern-Python-Standard-Library-Cookbook
|
4f53e3ab7b61aca1cca9343e7421e170280cd5b5
|
[
"MIT"
] | 84
|
2018-08-09T09:30:03.000Z
|
2022-01-04T23:20:38.000Z
|
Chapter04/filesdirs_03.py
|
jiro74/Modern-Python-Standard-Library-Cookbook
|
4f53e3ab7b61aca1cca9343e7421e170280cd5b5
|
[
"MIT"
] | 1
|
2019-11-04T18:57:40.000Z
|
2020-09-07T08:52:25.000Z
|
Chapter04/filesdirs_03.py
|
jiro74/Modern-Python-Standard-Library-Cookbook
|
4f53e3ab7b61aca1cca9343e7421e170280cd5b5
|
[
"MIT"
] | 33
|
2018-09-26T11:05:55.000Z
|
2022-03-15T10:31:10.000Z
|
import pathlib
print(list(pathlib.Path('.').glob('*.py')))
print(list(pathlib.Path('.').glob('**/*.py')))
| 21.4
| 46
| 0.607477
| 14
| 107
| 4.642857
| 0.5
| 0.276923
| 0.492308
| 0.615385
| 0.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046729
| 107
| 5
| 46
| 21.4
| 0.637255
| 0
| 0
| 0
| 0
| 0
| 0.12037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 9
|
971422f6e39bd9efc9719611ea8dc5fd60e1120b
| 3,078
|
py
|
Python
|
application/service/wallet_pair_response.py
|
singnet/snet-converter-services
|
346b26f8281944a9f47d4bdd1eba54c8fb43e799
|
[
"MIT"
] | null | null | null |
application/service/wallet_pair_response.py
|
singnet/snet-converter-services
|
346b26f8281944a9f47d4bdd1eba54c8fb43e799
|
[
"MIT"
] | 1
|
2022-03-21T04:43:48.000Z
|
2022-03-21T04:43:48.000Z
|
application/service/wallet_pair_response.py
|
singnet/snet-converter-services
|
346b26f8281944a9f47d4bdd1eba54c8fb43e799
|
[
"MIT"
] | 4
|
2021-11-30T04:32:59.000Z
|
2022-03-23T07:20:53.000Z
|
from constants.entity import WalletPairEntities, WalletPairResponseEntities
def get_wallet_pair_by_addresses_response(wallet_pair):
return {
WalletPairEntities.ROW_ID.value: wallet_pair[WalletPairEntities.ROW_ID.value],
WalletPairEntities.ID.value: wallet_pair[WalletPairEntities.ROW_ID.value],
WalletPairEntities.TOKEN_PAIR_ID.value: wallet_pair[WalletPairEntities.TOKEN_PAIR_ID.value],
WalletPairEntities.FROM_ADDRESS.value: wallet_pair[WalletPairEntities.FROM_ADDRESS.value],
WalletPairEntities.TO_ADDRESS.value: wallet_pair[WalletPairEntities.TO_ADDRESS.value],
WalletPairEntities.DEPOSIT_ADDRESS.value: wallet_pair[WalletPairEntities.DEPOSIT_ADDRESS.value],
WalletPairEntities.DEPOSIT_ADDRESS_DETAIL.value: wallet_pair[WalletPairEntities.DEPOSIT_ADDRESS_DETAIL.value],
WalletPairEntities.SIGNATURE.value: wallet_pair[WalletPairEntities.SIGNATURE.value],
WalletPairEntities.SIGNATURE_EXPIRY.value: wallet_pair[WalletPairEntities.SIGNATURE_EXPIRY.value],
WalletPairEntities.UPDATED_AT.value: wallet_pair[WalletPairEntities.UPDATED_AT.value]
}
def create_wallet_pair_response(wallet_pair):
return {
WalletPairEntities.ROW_ID.value: wallet_pair[WalletPairEntities.ROW_ID.value],
WalletPairEntities.ID.value: wallet_pair[WalletPairEntities.ID.value],
WalletPairEntities.TOKEN_PAIR_ID.value: wallet_pair[WalletPairEntities.TOKEN_PAIR_ID.value],
WalletPairEntities.FROM_ADDRESS.value: wallet_pair[WalletPairEntities.FROM_ADDRESS.value],
WalletPairEntities.TO_ADDRESS.value: wallet_pair[WalletPairEntities.TO_ADDRESS.value],
WalletPairEntities.DEPOSIT_ADDRESS.value: wallet_pair[WalletPairEntities.DEPOSIT_ADDRESS.value],
WalletPairEntities.DEPOSIT_ADDRESS_DETAIL.value: wallet_pair[WalletPairEntities.DEPOSIT_ADDRESS_DETAIL.value],
WalletPairEntities.SIGNATURE.value: wallet_pair[WalletPairEntities.SIGNATURE.value],
WalletPairEntities.SIGNATURE_EXPIRY.value: wallet_pair[WalletPairEntities.SIGNATURE_EXPIRY.value],
WalletPairEntities.UPDATED_AT.value: wallet_pair[WalletPairEntities.UPDATED_AT.value]
}
def get_wallet_pair_detail_by_deposit_address_response(wallet_pair):
return {
WalletPairEntities.ROW_ID.value: wallet_pair[WalletPairEntities.ROW_ID.value],
WalletPairEntities.ID.value: wallet_pair[WalletPairEntities.ID.value],
WalletPairEntities.TOKEN_PAIR_ID.value: wallet_pair[WalletPairEntities.TOKEN_PAIR_ID.value]
}
def get_wallet_pair_by_conversion_id_response(wallet_pair):
return get_wallet_pair_by_addresses_response(wallet_pair)
def get_all_deposit_address_response(wallet_pairs):
return {
WalletPairResponseEntities.ADDRESSES.value: [wallet_pair[WalletPairEntities.DEPOSIT_ADDRESS.value] for
wallet_pair in wallet_pairs]
}
def get_wallets_address_by_ethereum_address_response(address):
return {
WalletPairResponseEntities.CARDANO_ADDRESS.value: address
}
| 54
| 118
| 0.797921
| 325
| 3,078
| 7.196923
| 0.107692
| 0.149637
| 0.153912
| 0.338606
| 0.845233
| 0.830697
| 0.830697
| 0.808465
| 0.780248
| 0.778965
| 0
| 0
| 0.12898
| 3,078
| 56
| 119
| 54.964286
| 0.872436
| 0
| 0
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.022727
| 0.136364
| 0.295455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
978d40b204a04d26271c83479357a7bc2af0d032
| 3,994
|
py
|
Python
|
internos/etools/migrations/0043_auto_20190515_1341.py
|
UNICEFLebanonInnovation/Staging-Neuro
|
aac1e4f335ff4ec32041f989a9c22f8581a4961a
|
[
"MIT"
] | 1
|
2020-12-12T07:41:11.000Z
|
2020-12-12T07:41:11.000Z
|
internos/etools/migrations/0043_auto_20190515_1341.py
|
UNICEFLebanonInnovation/Staging-Neuro
|
aac1e4f335ff4ec32041f989a9c22f8581a4961a
|
[
"MIT"
] | 9
|
2019-12-31T09:30:23.000Z
|
2022-01-13T00:49:47.000Z
|
internos/etools/migrations/0043_auto_20190515_1341.py
|
UNICEFLebanonInnovation/Staging-Neuro
|
aac1e4f335ff4ec32041f989a9c22f8581a4961a
|
[
"MIT"
] | 1
|
2020-02-03T13:12:55.000Z
|
2020-02-03T13:12:55.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-15 13:41
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('etools', '0042_actionpoint_category_name'),
]
operations = [
migrations.AddField(
model_name='partnerorganization',
name='assessments',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='partnerorganization',
name='core_values_assessment_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='partnerorganization',
name='core_values_assessments',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='partnerorganization',
name='flags',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='partnerorganization',
name='hact_min_requirements',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='partnerorganization',
name='hact_values',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='partnerorganization',
name='last_assessment_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='partnerorganization',
name='net_ct_cy',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='partnerorganization',
name='planned_engagement',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='partnerorganization',
name='planned_visits',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='partnerorganization',
name='reported_cy',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='partnerorganization',
name='staff_members',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='partnerorganization',
name='total_ct_cp',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='partnerorganization',
name='total_ct_cy',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='partnerorganization',
name='total_ct_ytd',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='partnerorganization',
name='type_of_assessment',
field=models.CharField(blank=True, max_length=250, null=True),
),
]
| 41.175258
| 140
| 0.631197
| 404
| 3,994
| 6.066832
| 0.188119
| 0.117503
| 0.150143
| 0.176255
| 0.868625
| 0.868625
| 0.848225
| 0.842513
| 0.831497
| 0.831497
| 0
| 0.024169
| 0.254131
| 3,994
| 96
| 141
| 41.604167
| 0.79859
| 0.017276
| 0
| 0.719101
| 1
| 0
| 0.146609
| 0.025752
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033708
| 0
| 0.067416
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
97c27789ef8dc80300f339d494117bce96bfb361
| 246
|
py
|
Python
|
Module 3/Chapter 5/ch5_4.py
|
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
|
bb7fd9a3071b4247d13accfbf0a48eefec76e925
|
[
"MIT"
] | 50
|
2016-12-11T13:49:01.000Z
|
2022-03-20T19:47:55.000Z
|
Module 3/Chapter 5/ch5_4.py
|
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
|
bb7fd9a3071b4247d13accfbf0a48eefec76e925
|
[
"MIT"
] | null | null | null |
Module 3/Chapter 5/ch5_4.py
|
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
|
bb7fd9a3071b4247d13accfbf0a48eefec76e925
|
[
"MIT"
] | 40
|
2017-06-14T14:02:48.000Z
|
2021-10-14T06:25:00.000Z
|
import nltk
from nltk.corpus import treebank_chunk
print(treebank_chunk.chunked_sents()[1].leaves())
print(treebank_chunk.chunked_sents()[1].pos())
print(treebank_chunk.chunked_sents()[1].productions())
print(nltk.corpus.treebank.tagged_words())
| 35.142857
| 54
| 0.804878
| 35
| 246
| 5.428571
| 0.428571
| 0.273684
| 0.284211
| 0.394737
| 0.489474
| 0.489474
| 0
| 0
| 0
| 0
| 0
| 0.012712
| 0.04065
| 246
| 6
| 55
| 41
| 0.792373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
8ae82a1dbf777d3c05d4f79cff0dcf50c9f1f384
| 130
|
py
|
Python
|
ad2web/api/utils.py
|
billfor/alarmdecoder-webapp
|
43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc
|
[
"BSD-3-Clause",
"MIT"
] | 46
|
2015-06-14T02:19:16.000Z
|
2022-03-24T03:11:19.000Z
|
ad2web/api/utils.py
|
billfor/alarmdecoder-webapp
|
43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc
|
[
"BSD-3-Clause",
"MIT"
] | 66
|
2015-03-14T16:30:43.000Z
|
2021-08-28T22:20:01.000Z
|
ad2web/api/utils.py
|
billfor/alarmdecoder-webapp
|
43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc
|
[
"BSD-3-Clause",
"MIT"
] | 44
|
2015-02-13T19:23:37.000Z
|
2021-12-30T04:17:21.000Z
|
# -*- coding: utf-8 -*-
import os
import base64
def generate_api_key():
return base64.b32encode(os.urandom(7)).rstrip('==')
| 16.25
| 55
| 0.661538
| 18
| 130
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072072
| 0.146154
| 130
| 7
| 56
| 18.571429
| 0.684685
| 0.161538
| 0
| 0
| 1
| 0
| 0.018692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
8aea8a1bfd66e757e3e0bd89fd033db7736fa123
| 1,718
|
py
|
Python
|
tests/test_1889.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1889.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1889.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pytest
"""
Test 1889. Minimum Space Wasted From Packaging
"""
@pytest.fixture(scope="session")
def init_variables_1889():
from src.leetcode_1889_minimum_space_wasted_from_packaging import Solution
solution = Solution()
def _init_variables_1889():
return solution
yield _init_variables_1889
class TestClass1889:
def test_solution_0(self, init_variables_1889):
assert init_variables_1889().minWastedSpace([2, 3, 5], [[4, 8], [2, 8]]) == 6
def test_solution_1(self, init_variables_1889):
assert init_variables_1889().minWastedSpace([2, 3, 5], [[1, 4], [2, 3], [3, 4]]) == -1
def test_solution_2(self, init_variables_1889):
assert (
init_variables_1889().minWastedSpace([3, 5, 8, 10, 11, 12], [[12], [11, 9], [10, 5, 14]])
== 9
)
#!/usr/bin/env python
import pytest
"""
Test 1889. Minimum Space Wasted From Packaging
"""
@pytest.fixture(scope="session")
def init_variables_1889():
from src.leetcode_1889_minimum_space_wasted_from_packaging import Solution
solution = Solution()
def _init_variables_1889():
return solution
yield _init_variables_1889
class TestClass1889:
def test_solution_0(self, init_variables_1889):
assert init_variables_1889().minWastedSpace([2, 3, 5], [[4, 8], [2, 8]]) == 6
def test_solution_1(self, init_variables_1889):
assert init_variables_1889().minWastedSpace([2, 3, 5], [[1, 4], [2, 3], [3, 4]]) == -1
def test_solution_2(self, init_variables_1889):
assert (
init_variables_1889().minWastedSpace([3, 5, 8, 10, 11, 12], [[12], [11, 9], [10, 5, 14]])
== 9
)
| 24.898551
| 101
| 0.648428
| 232
| 1,718
| 4.525862
| 0.181034
| 0.222857
| 0.291429
| 0.12
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.131657
| 0.213038
| 1,718
| 68
| 102
| 25.264706
| 0.64497
| 0.023283
| 0
| 0.944444
| 0
| 0
| 0.008929
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.277778
| false
| 0
| 0.111111
| 0.055556
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8af0cc8eab2612c0e200adfd1bbd585565b5fc71
| 5,130
|
py
|
Python
|
misc/python_sealog/cruises.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 4
|
2019-10-29T21:53:13.000Z
|
2021-12-02T00:38:42.000Z
|
misc/python_sealog/cruises.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 14
|
2020-05-28T16:39:30.000Z
|
2021-05-22T06:01:40.000Z
|
misc/python_sealog/cruises.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 1
|
2020-01-31T00:00:42.000Z
|
2020-01-31T00:00:42.000Z
|
#!/usr/bin/env python3
'''
FILE: cruises.py
DESCRIPTION: This script contains the wrapper functions for the sealog-
server cruise routes.
BUGS:
NOTES:
AUTHOR: Webb Pinner
COMPANY: OceanDataTools.org
VERSION: 0.1
CREATED: 2021-01-01
REVISION:
LICENSE INFO: This code is licensed under MIT license (see LICENSE.txt for details)
Copyright (C) OceanDataTools.org 2021
'''
import json
import logging
import requests
from .settings import API_SERVER_URL, HEADERS, CRUISES_API_PATH
def get_cruise(cruise_uid, export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return a cruise record based on the cruise_id. Returns the record as a json
object by default. Set export_format to 'csv' to return the record in csv
format.
'''
try:
url = api_server_url + CRUISES_API_PATH + '/' + cruise_uid + '?format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)
if export_format == 'csv':
return req.text
else:
return None
except Exception as error:
logging.error(str(error))
raise error
return None
def get_cruises(export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return all cruise records. Returns the records as json objects by default
Set export_format to 'csv' to return the records in csv format.
'''
try:
url = api_server_url + CRUISES_API_PATH + '?format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)
if export_format == 'csv':
return req.text
if req.status_code == 404:
if export_format == 'json':
return []
if export_format == 'csv':
return ""
except Exception as error:
logging.error(str(error))
raise error
return None
def get_cruise_uid_by_id(cruise_id, api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the UID for a cruise record based on the cruise_id.
'''
try:
url = api_server_url + CRUISES_API_PATH + '?cruise_id=' + cruise_id
req = requests.get(url, headers=headers)
if req.status_code == 200:
cruise = json.loads(req.text)[0]
return cruise['id']
except Exception as error:
logging.error(str(error))
raise error
return None
def get_cruise_by_id(cruise_id, export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the cruise record based on the cruise_id. Returns the records as json
object by default. Set export_format to 'csv' to return the record in csv
format.
'''
try:
url = api_server_url + CRUISES_API_PATH + '?cruise_id=' + cruise_id + '&format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)[0]
if export_format == 'csv':
return req.text
else:
return None
except Exception as error:
logging.error(str(error))
raise error
return None
def get_cruise_by_lowering(lowering_uid, export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the cruise record that contains the lowering whose uid is
lowering_uid. Returns the record as a json object by default. Set
export_format to 'csv' to return the record in csv format.
'''
try:
url = api_server_url + CRUISES_API_PATH + '/bylowering/' + lowering_uid + '?format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)
if export_format == 'csv':
return req.text
else:
return None
except Exception as error:
logging.error(str(error))
raise error
return None
def get_cruise_by_event(event_uid, export_format='json', api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Return the cruise record that contains the event whose uid is
event_uid. Returns the record as a json object by default. Set
export_format to 'csv' to return the record in csv format.
'''
try:
url = api_server_url + CRUISES_API_PATH + '/byevent/' + event_uid + '?format=' + export_format
req = requests.get(url, headers=headers)
if req.status_code == 200:
if export_format == 'json':
return json.loads(req.text)
if export_format == 'csv':
return req.text
else:
return None
except Exception as error:
logging.error(str(error))
raise error
return None
| 27.880435
| 111
| 0.619688
| 668
| 5,130
| 4.582335
| 0.149701
| 0.105848
| 0.074485
| 0.058804
| 0.803986
| 0.772297
| 0.772297
| 0.772297
| 0.761843
| 0.742894
| 0
| 0.010494
| 0.294152
| 5,130
| 183
| 112
| 28.032787
| 0.834852
| 0.25692
| 0
| 0.763441
| 0
| 0
| 0.040239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.043011
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c118da10af6c651595c0014c195ea3d26010654b
| 25,942
|
py
|
Python
|
Tic-Tac-Toe/Tic-Tac-Toe.py
|
alexeevivan/pythonic
|
7957421f043205dd7d5f8f5f8579aaa48411bfc8
|
[
"Unlicense"
] | 1
|
2022-03-27T12:59:12.000Z
|
2022-03-27T12:59:12.000Z
|
Tic-Tac-Toe/Tic-Tac-Toe.py
|
alexeevivan/pythonic
|
7957421f043205dd7d5f8f5f8579aaa48411bfc8
|
[
"Unlicense"
] | null | null | null |
Tic-Tac-Toe/Tic-Tac-Toe.py
|
alexeevivan/pythonic
|
7957421f043205dd7d5f8f5f8579aaa48411bfc8
|
[
"Unlicense"
] | null | null | null |
import os
from colorama import init
from colorama import Fore, Back, Style
# добавляем возможность выбора языка
lang_selection = int(input("Для выбора русского языка введите «1»\nTo choose english language press «2»:"))
while lang_selection!=1 and lang_selection!=2:
print(Fore.RED)
print("\nВведён некорректный символ. Попробуйте еще раз.\nAn invalid response was entered. Try again.")
print(Fore.WHITE)
lang_selection = int(input("Для выбора русского языка введите «1»\nTo choose english language press «2»:"))
if lang_selection==1:
# задаём значения всем переменным, которые будут встречаться в коде, при этом не требующие ввода данных от игроков
j = ("\nВас приветствует игра «-Tic-Tac-Toe-» !!! \nКаждая ячейка выделена цифровым индикатором для удобства планирования ходов.")
b2 = """
| | | |
■ | ■ | ■ 1 | 2 | 3
| | | |
— — — — — — — — — — — — — — — — — —
| | | |
■ | ■ | ■ 4 | 5 | 6
| | | |
— — — — — — — — — — — — — — — — — —
| | | |
■ | ■ | ■ 7 | 8 | 9
| | | |
— — — — — — — — — — — — — — — — — —
"""
x = ("X")
o = ("O")
c = ("Отлично! Игрок")
d = ("использует символ X. Осталось совсем чуть-чуть.")
e = ("использует символ O. Пора начинать.")
pl = ("+")
pl_1 = ("/'+'/")
pl_2 = ('/"+"/')
pl_3 = ("«+»")
pl_4 = pl or pl_1 or pl_2 or pl_3
m = ("-")
m_1 = ("/'-'/")
m_2 = ('/"-"/')
m_3 = ("«-»")
m_4 = m or m_1 or m_2 or m_3
print(j)
print(Fore.MAGENTA, b2)
print(Fore.WHITE)
player_1 = str.title(input("Введите имя первого игрока:"))
player_2 = str.title(input("\nВведите имя второго игрока:"))
p_1, p_2 = player_1, player_2
print(Fore.LIGHTCYAN_EX)
print("Приветствую,", player_1, "и", player_2, "!")
# добавление вопроса о предпочтении использования того или иного сивола, поскольку ввод первого имени не будет являтся причиной получения права первого хода
print(Fore.WHITE)
f = int(input("Введите «1», если игрок №1 желает использовать символ Х, или «2» - если будет использовать символ О:"))
print(Fore.LIGHTCYAN_EX)
while f!=1 and f>2:
print("Введён некорректный ответ. Попробуйте ещё раз.")
f = int(input("Введите «1», если игрок №1 желает использовать символ Х, или «2» - если будет использовать символ О:"))
if f==1:
print(c, p_1, d)
if f==2:
print(c, p_1, e)
print(Fore.WHITE)
g = int(input("Введите «1», если игрок №2 использует символ Х, или «2» - если использует символ О:"))
while g==f:
print(Fore.RED)
print("К сожалению, данный символ зарезервирован за игроком", player_1, ".")
print(Fore.WHITE)
g = int(input("Введите «1», если игрок №2 использует символ Х, или «2» - если использует символ О:"))
while g!=1 and g>2:
print(Fore.RED)
print("Введён некорректный ответ. Попробуйте ещё раз.")
print(Fore.WHITE)
g = int(input("Введите «1», если игрок №2 использует символ Х, или «2» - если использует символ О:"))
print(Fore.LIGHTCYAN_EX)
if g==1:
print(c, p_2, d)
if g==2:
print(c, p_2, e)
print(Fore.WHITE)
# создание дополнительной вариативности соврешения ходов
u = input("Как правило, первый ход совершает игрок, чьим символом является «Х». \nВ зависимости от выбранного Вами ответа («+» или «-») я пойму, желаете ли Вы следовать этому правилу:")
# отображение результатов в зависимости от внесённых условий от пользователя
while u!=pl_4 and u!=m_4:
print("Введён некорректный символ. Попробуйте ещё раз:")
u = input("Как правило, первый ход совершает игрок, чьим символом является «Х». \nВ зависимости от выбранного Вами ответа («+» или «-») я пойму, желаете ли Вы следовать этому правилу:")
print(Fore.LIGHTCYAN_EX)
if (u==pl_4 and f==1) or (u!=pl_4 and g==2) or (u!=m_4 and f==1) or (u==m_4 and g==2):
print("Отлично. Игрок", p_1, "использует символ", x,"и начнёт игру первым!")
print("Отлично. Игрок", p_2, "использует символ", o,"и начнёт игру вторым!")
elif (u!=pl_4 and f==1) or (u==pl_4 and g==2) or (u==m_4 and f==1) or (u!=m_4 and g==2):
print("Отлично. Игрок", p_2, "использует символ", o,"и начнёт игру первым!")
print("Отлично. Игрок", p_1, "использует символ", x,"и начнёт игру вторым!")
elif (u==pl_4 and g==1) or (u!=pl_4 and f==2) or (u!=m_4 and g==1) or (u==m_4 and f==2):
print("Отлично. Игрок", p_2, "использует символ", x,"и начнёт игру первым!")
print("Отлично. Игрок", p_1, "использует символ", o,"и начнёт игру вторым!")
elif (u!=pl_4 and g==1) or (u==pl_4 and f==2) or (u==m_4 and g==1) or (u!=m_4 and f==2):
print("Отлично. Игрок", p_1, "использует символ", o,"и начнёт игру первым!")
print("Отлично. Игрок", p_2, "использует символ", x,"и начнёт игру вторым!")
print(Fore.MAGENTA, b2)
print(Fore.WHITE)
# сокращение до переменных выбора игрока
# player_1 == X, player_2 == 0
xx_1 = int((u==pl_4 and f==1) or (u!=pl_4 and g==2) or (u!=m_4 and f==1) or (u==m_4 and g==2))
# player_1==O, player_2==X
xx_2 = int((u!=pl_4 and g==1) or (u==pl_4 and f==2) or (u==m_4 and g==1) or (u!=m_4 and f==2))
# player_2==X, player_1==O
xx_3 = int((u==pl_4 and g==1) or (u!=pl_4 and f==2) or (u!=m_4 and g==1) or (u==m_4 and f==2))
# player_2==O, player_1==X
xx_4 = int((u!=pl_4 and f==1) or (u==pl_4 and g==2) or (u==m_4 and f==1) or (u!=m_4 and g==2))
win_combo = [(1, 2, 3), (4, 5, 6), (7, 8, 9), (1, 4, 7), (2, 5, 8), (3, 6, 9), (1, 5, 9), (3, 5, 7)]
board = list(range(1, 10))
# функция отображения строки, которая будет использована каждый раз после совершения хода
def draw_board():
for i in range(3):
print(" | | ")
print(" ", board[0 + i * 3], " | ", board[1 + i * 3], " | ", board[2 + i * 3])
print(" | | ")
print("— — — — — — — — —")
# функция, отслеживающая ситуацию на игровом поле в зависимости от выбранной ячейки для хода
def take_input(player_token):
while True:
value = input("Введите номер ячейки для совершения хода:")
if not (value in '123456789'):
print("Введён некорректный символ. Попробуйте ещё раз:")
continue
value = int(value)
if str(board[value - 1]) in "XO":
print(Fore.RED)
print("К сожалению, данная клетка уже занята Вашим оппонентом. Выберите пустую ячейку:")
print(Fore.WHITE)
continue
board[value - 1] = player_token
break
# функция проверки условий для победы
def check_win():
for each in win_combo:
if (board[each[0]-1]) == (board[each[1]-1]) == (board[each[2]-1]):
return board[each[1]-1]
else:
return False
def main():
# установка счётчика ходов
counter = 0
while True:
# если предыдущие введённые данные пользователем привели к следующему результату:\
# игрок_1 начнёт игру первым с использованием символа «Х»
if xx_1==True:
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.MAGENTA)
draw_board()
print(Fore.WHITE)
if counter % 2 == 0:
take_input(x)
else:
take_input(o)
if counter > 3:
victory = check_win()
if victory:
print(Fore.GREEN)
draw_board()
if victory==check_win() and victory==x:
print(Fore.BLUE)
print(player_1, "победил(-a)!")
print(Fore.WHITE)
break
if victory==check_win() and victory==o:
print(Fore.BLUE)
print(player_2, "победил(-a)!")
print(Fore.WHITE)
break
counter += 1
if counter > 8:
draw_board()
print(Fore.YELLOW)
print("Ничья!")
print(Fore.WHITE)
break
# если предыдущие введённые данные пользователем привели к следующему результату:\
# игрок_1 начнёт игру первым с использованием символа «О»
elif xx_2==True:
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.MAGENTA)
draw_board()
print(Fore.WHITE)
if counter % 2 == 0:
take_input(o)
else:
take_input(x)
if counter > 3:
victory = check_win()
if victory:
print(Fore.GREEN)
draw_board()
if victory==check_win() and victory==o:
print(Fore.BLUE)
print(player_1, "победил(-a)!")
print(Fore.WHITE)
break
if victory==check_win() and victory==x:
print(Fore.BLUE)
print(player_2, "победил(-a)!")
print(Fore.WHITE)
break
counter += 1
if counter > 8:
draw_board()
print(Fore.YELLOW)
print("Ничья!")
print(Fore.WHITE)
break
# если предыдущие введённые данные пользователем привели к следующему результату:\
# игрок_2 начнёт игру первым с использованием символа «Х»
elif xx_3==True:
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.MAGENTA)
draw_board()
print(Fore.WHITE)
if counter % 2 == 0:
take_input(x)
else:
take_input(o)
if counter > 3:
victory = check_win()
if victory:
print(Fore.GREEN)
draw_board()
if victory==check_win() and victory==x:
print(Fore.BLUE)
print(player_2, "победил(-a)!")
print(Fore.WHITE)
break
if victory==check_win() and victory==o:
print(Fore.BLUE)
print(player_1, "победил(-a)!")
print(Fore.WHITE)
break
counter += 1
if counter > 8:
draw_board()
print(Fore.YELLOW)
print("Ничья!")
print(Fore.WHITE)
break
# если предыдущие введённые данные пользователем привели к следующему результату:\
# игрок_2 начнёт игру первым с использованием символа «О»
elif xx_4==True:
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.MAGENTA)
draw_board()
print(Fore.WHITE)
if counter % 2 == 0:
take_input(o)
else:
take_input(x)
if counter > 3:
victory = check_win()
if victory:
print(Fore.GREEN)
draw_board()
if victory==check_win() and victory==o:
print(Fore.BLUE)
print(player_2, "победил(-а)!")
print(Fore.WHITE)
break
if victory==check_win() and victory==x:
print(Fore.BLUE)
print(player_1, "победил(-а)!")
print(Fore.WHITE)
break
counter += 1
if counter > 8:
draw_board()
print(Fore.YELLOW)
print("Ничья!")
print(Fore.WHITE)
break
main()
elif lang_selection==2:
# задаём значения всем переменным, которые будут встречаться в коде, при этом не требующие ввода данных от игроков
j = ("\nWelcome to the «Tic-Tac-Toe» game!!! \nEach cell is highlighted with a digital indicator for easy planning of moves.")
b2 = """
| | | |
■ | ■ | ■ 1 | 2 | 3
| | | |
— — — — — — — — — — — — — — — — — —
| | | |
■ | ■ | ■ 4 | 5 | 6
| | | |
— — — — — — — — — — — — — — — — — —
| | | |
■ | ■ | ■ 7 | 8 | 9
| | | |
— — — — — — — — — — — — — — — — — —
"""
x = ("X")
o = ("O")
c = ("Greeat! The player")
d = ("uses X symbol. Wait for a moment.")
e = ("uses O symbol. Let's start.")
pl = ("+")
pl_1 = ("/'+'/")
pl_2 = ('/"+"/')
pl_3 = ("«+»")
pl_4 = pl or pl_1 or pl_2 or pl_3
m = ("-")
m_1 = ("/'-'/")
m_2 = ('/"-"/')
m_3 = ("«-»")
m_4 = m or m_1 or m_2 or m_3
print(j)
print(Fore.MAGENTA, b2)
print(Fore.WHITE)
player_1 = str.title(input("Enter the name of the first player:"))
player_2 = str.title(input("\nEnter the name of the second player:"))
p_1, p_2 = player_1, player_2
print(Fore.LIGHTCYAN_EX)
print("Welcome,", player_1, "and", player_2, "!")
# добавление вопроса о предпочтении использования того или иного сивола, поскольку ввод первого имени не будет являтся причиной получения права первого хода
print(Fore.WHITE)
f = int(input("Enter «1» if player # 1 wants to use the X symbol, or «2» - if wants to use the O symbol:"))
print(Fore.LIGHTCYAN_EX)
while f!=1 and f>2:
print("An invalid response was entered. Try again:")
f = int(input("Enter «1» if player # 1 wants to use the X symbol, or «2» - if wants to use the O symbol:"))
if f==1:
print(c, p_1, d)
if f==2:
print(c, p_1, e)
print(Fore.WHITE)
g = int(input("Enter «1» if player # 1 wants to use the X symbol, or «2» - if he/she want to use the O symbol:"))
while g==f:
print(Fore.RED)
print("Unfortunately, this symbol is reserved for the player", player_1, ".")
print(Fore.WHITE)
g = int(input("Enter «1» if player # 1 wants to use the X symbol, or «2» - if he/she want to use the O symbol:"))
while g!=1 and g>2:
print(Fore.RED)
print("An invalid response was entered. Try again:.")
print(Fore.WHITE)
g = int(input("Enter «1» if player # 2 uses the X symbol, or «2» - if he/she uses the O symbol:"))
print(Fore.LIGHTCYAN_EX)
if g==1:
print(c, p_2, d)
if g==2:
print(c, p_2, e)
print(Fore.WHITE)
# создание дополнительной вариативности соврешения ходов
u = input("As a rule, the first move is made by the player whose symbol is «X». \ n depending on the answer you choose («+» or «-»), I will understand if You want to follow this rule:")
# отображение результатов в зависимости от внесённых условий от пользователя
while u!=pl_4 and u!=m_4:
print("An invalid response was entered. Try again:")
u = input("As a rule, the first move is made by the player whose symbol is «X». \ n depending on the answer you choose («+» or «-»), I will understand if You want to follow this rule:")
print(Fore.LIGHTCYAN_EX)
if (u==pl_4 and f==1) or (u!=pl_4 and g==2) or (u!=m_4 and f==1) or (u==m_4 and g==2):
print("Great. The player", p_1, "uses sybmol", x,"and will start the game first!")
print("Great. The player", p_2, "uses sybmol", o,"and will start the game first!")
elif (u!=pl_4 and f==1) or (u==pl_4 and g==2) or (u==m_4 and f==1) or (u!=m_4 and g==2):
print("Great. The player", p_2, "uses sybmol", o,"and will start the game first!")
print("Great. The player", p_1, "uses sybmol", x,"and will start the game first!")
elif (u==pl_4 and g==1) or (u!=pl_4 and f==2) or (u!=m_4 and g==1) or (u==m_4 and f==2):
print("Great. The player", p_2, "uses sybmol", x,"and will start the game first!")
print("Great. The player", p_1, "uses sybmol", o,"and will start the game first!")
elif (u!=pl_4 and g==1) or (u==pl_4 and f==2) or (u==m_4 and g==1) or (u!=m_4 and f==2):
print("Great. The player", p_1, "uses sybmol", o,"and will start the game first!")
print("Great. The player", p_2, "uses sybmol", x,"and will start the game first!")
print(Fore.MAGENTA, b2)
print(Fore.WHITE)
# сокращение до переменных выбора игрока
# player_1 == X, player_2 == 0
xx_1 = int((u==pl_4 and f==1) or (u!=pl_4 and g==2) or (u!=m_4 and f==1) or (u==m_4 and g==2))
# player_1==O, player_2==X
xx_2 = int((u!=pl_4 and g==1) or (u==pl_4 and f==2) or (u==m_4 and g==1) or (u!=m_4 and f==2))
# player_2==X, player_1==O
xx_3 = int((u==pl_4 and g==1) or (u!=pl_4 and f==2) or (u!=m_4 and g==1) or (u==m_4 and f==2))
# player_2==O, player_1==X
xx_4 = int((u!=pl_4 and f==1) or (u==pl_4 and g==2) or (u==m_4 and f==1) or (u!=m_4 and g==2))
win_combo = [(1, 2, 3), (4, 5, 6), (7, 8, 9), (1, 4, 7), (2, 5, 8), (3, 6, 9), (1, 5, 9), (3, 5, 7)]
board = list(range(1, 10))
# функция отображения строки, которая будет использована каждый раз после совершения хода
def draw_board():
for i in range(3):
print(" | | ")
print(" ", board[0 + i * 3], " | ", board[1 + i * 3], " | ", board[2 + i * 3])
print(" | | ")
print("— — — — — — — — —")
# функция, отслеживающая ситуацию на игровом поле в зависимости от выбранной ячейки для хода
def take_input(player_token):
while True:
value = input("Pleaese enter the cell number to make the move:")
if not (value in '123456789'):
print("An invalid response was entered. Try again:")
continue
value = int(value)
if str(board[value - 1]) in "XO":
print(Fore.RED)
print("Unfortunately, this cell is already occupied by your opponent. Select an empty cell:")
print(Fore.WHITE)
continue
board[value - 1] = player_token
break
# функция проверки условий для победы
def check_win():
for each in win_combo:
if (board[each[0]-1]) == (board[each[1]-1]) == (board[each[2]-1]):
return board[each[1]-1]
else:
return False
def main():
# установка счётчика ходов
counter = 0
while True:
# если предыдущие введённые данные пользователем привели к следующему результату:\
# игрок_1 начнёт игру первым с использованием символа «Х»
if xx_1==True:
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.MAGENTA)
draw_board()
print(Fore.WHITE)
if counter % 2 == 0:
take_input(x)
else:
take_input(o)
if counter > 3:
victory = check_win()
if victory:
print(Fore.GREEN)
draw_board()
if victory==check_win() and victory==x:
print(Fore.BLUE)
print(player_1, "won!")
print(Fore.WHITE)
break
if victory==check_win() and victory==o:
print(Fore.BLUE)
print(player_2, "won!")
print(Fore.WHITE)
break
counter += 1
if counter > 8:
draw_board()
print(Fore.YELLOW)
print("Draw!")
print(Fore.WHITE)
break
# если предыдущие введённые данные пользователем привели к следующему результату:\
# игрок_1 начнёт игру первым с использованием символа «О»
elif xx_2==True:
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.MAGENTA)
draw_board()
print(Fore.WHITE)
if counter % 2 == 0:
take_input(o)
else:
take_input(x)
if counter > 3:
victory = check_win()
if victory:
print(Fore.GREEN)
draw_board()
if victory==check_win() and victory==o:
print(Fore.BLUE)
print(player_1, "won!")
print(Fore.WHITE)
break
if victory==check_win() and victory==x:
print(Fore.BLUE)
print(player_2, "won!")
print(Fore.WHITE)
break
counter += 1
if counter > 8:
draw_board()
print(Fore.YELLOW)
print("Draw!")
print(Fore.WHITE)
break
# если предыдущие введённые данные пользователем привели к следующему результату:\
# игрок_2 начнёт игру первым с использованием символа «Х»
elif xx_3==True:
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.MAGENTA)
draw_board()
print(Fore.WHITE)
if counter % 2 == 0:
take_input(x)
else:
take_input(o)
if counter > 3:
victory = check_win()
if victory:
print(Fore.GREEN)
draw_board()
if victory==check_win() and victory==x:
print(Fore.BLUE)
print(player_2, "won!")
print(Fore.WHITE)
break
if victory==check_win() and victory==o:
print(Fore.BLUE)
print(player_1, "won!")
print(Fore.WHITE)
break
counter += 1
if counter > 8:
draw_board()
print(Fore.YELLOW)
print("Draw!")
print(Fore.WHITE)
break
# если предыдущие введённые данные пользователем привели к следующему результату:\
# игрок_2 начнёт игру первым с использованием символа «О»
elif xx_4==True:
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.MAGENTA)
draw_board()
print(Fore.WHITE)
if counter % 2 == 0:
take_input(o)
else:
take_input(x)
if counter > 3:
victory = check_win()
if victory:
print(Fore.GREEN)
draw_board()
if victory==check_win() and victory==o:
print(Fore.BLUE)
print(player_2, "won!")
print(Fore.WHITE)
break
if victory==check_win() and victory==x:
print(Fore.BLUE)
print(player_1, "won!")
print(Fore.WHITE)
break
counter += 1
if counter > 8:
draw_board()
print(Fore.YELLOW)
print("Draw!")
print(Fore.WHITE)
break
main()
| 44.044143
| 194
| 0.457251
| 3,337
| 25,942
| 3.5451
| 0.100989
| 0.019949
| 0.027895
| 0.034489
| 0.922823
| 0.915385
| 0.907101
| 0.888926
| 0.878276
| 0.871513
| 0
| 0.032646
| 0.426143
| 25,942
| 589
| 195
| 44.044143
| 0.744945
| 0.10377
| 0
| 0.922156
| 0
| 0.031936
| 0.245325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015968
| false
| 0
| 0.005988
| 0
| 0.02994
| 0.361277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c1233065e68be493898d589af31c528a3cfbe9d7
| 1,155
|
py
|
Python
|
myPuzzleSolvers/puzzle_boards/hashi_puzzle_boards.py
|
giraycoskun/myPuzzleSolvers
|
225286c9a404012953f79094c1769f1392950b8a
|
[
"MIT"
] | null | null | null |
myPuzzleSolvers/puzzle_boards/hashi_puzzle_boards.py
|
giraycoskun/myPuzzleSolvers
|
225286c9a404012953f79094c1769f1392950b8a
|
[
"MIT"
] | null | null | null |
myPuzzleSolvers/puzzle_boards/hashi_puzzle_boards.py
|
giraycoskun/myPuzzleSolvers
|
225286c9a404012953f79094c1769f1392950b8a
|
[
"MIT"
] | null | null | null |
"""Test Maps of Islands for Hashi Puzzle
"""
test1 = [
[
[1, 0],
[0, 1]
],
[
[0, 0, 1],
[0, 0, 0],
[1, 0, 2]
],
[
[0, 1, 0, 2, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 2, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
],
[
[0, 3, 0, 2, 0, 0, 3],
[1, 0, 1, 0, 0, 3, 0],
[0, 3, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[4, 0, 2, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 3, 0, 3, 0]
]
]
test2= [
[
[3, 0, 0, 0, 4, 0, 5, 0, 0, 4],
[0, 0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 3, 0, 0, 5],
[2, 0, 3, 0, 5, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[3, 0, 0, 2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 2, 0, 0],
[5, 0, 0, 0, 0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 1, 0, 0, 0, 0, 0, 0, 3]
]
]
test3 = [
[
[0, 2, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]
]
| 18.934426
| 40
| 0.2329
| 233
| 1,155
| 1.154506
| 0.06867
| 0.959108
| 1.092937
| 1.219331
| 0.780669
| 0.736059
| 0.698885
| 0.63197
| 0.583643
| 0.550186
| 0
| 0.388316
| 0.496104
| 1,155
| 60
| 41
| 19.25
| 0.073883
| 0.032035
| 0
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c141f739a33d986f1b2b35a3ae6fdb2d1a8094c0
| 3,960
|
py
|
Python
|
baselines/lstm_classifier.py
|
sorokine/NeuralTripleTranslation
|
9a58a8981ac6ca196668a88e46515951f1a7e5de
|
[
"Apache-2.0"
] | 47
|
2018-07-06T01:00:37.000Z
|
2021-12-05T08:05:35.000Z
|
baselines/lstm_classifier.py
|
sorokine/NeuralTripleTranslation
|
9a58a8981ac6ca196668a88e46515951f1a7e5de
|
[
"Apache-2.0"
] | 6
|
2018-10-29T09:35:58.000Z
|
2022-01-02T14:06:59.000Z
|
baselines/lstm_classifier.py
|
sorokine/NeuralTripleTranslation
|
9a58a8981ac6ca196668a88e46515951f1a7e5de
|
[
"Apache-2.0"
] | 14
|
2018-07-08T06:13:08.000Z
|
2021-06-18T06:21:56.000Z
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class BiLSTM(nn.Module):
def __init__(self, embedding_dim, hidden_size,
num_layers, vocab_size, num_classes, dropout):
super(BiLSTM, self).__init__()
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = 1
self.lstm = nn.LSTM(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
self.fc1 = nn.Linear(hidden_size * self.num_directions, 64)
self.fc2 = nn.Linear(64, num_classes)
self.dropout = nn.Dropout(p=dropout)
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
c0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
# Forward propagate RNN
outputs, _ = self.lstm(inputs, (h0, c0))
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
class BasicRNN(nn.Module):
def __init__(self, embedding_dim, hidden_size,
num_layers, vocab_size, num_classes, dropout):
super(BasicRNN, self).__init__()
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = 1
self.rnn = nn.RNN(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
self.fc1 = nn.Linear(hidden_size * self.num_directions, 64)
self.fc2 = nn.Linear(64, num_classes)
self.dropout = nn.Dropout(p=dropout)
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
# Forward propagate RNN
outputs, _ = self.rnn(inputs, h0)
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
class GRURNN(nn.Module):
def __init__(self, embedding_dim, hidden_size,
num_layers, vocab_size, num_classes, dropout):
super(GRURNN, self).__init__()
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = 1
self.rnn = nn.GRU(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
self.fc1 = nn.Linear(hidden_size * self.num_directions, 64)
self.fc2 = nn.Linear(64, num_classes)
self.dropout = nn.Dropout(p=dropout)
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
# Forward propagate RNN
outputs, _ = self.rnn(inputs, h0)
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
| 33.559322
| 103
| 0.634091
| 499
| 3,960
| 4.789579
| 0.132265
| 0.09205
| 0.07113
| 0.046862
| 0.932218
| 0.932218
| 0.932218
| 0.932218
| 0.932218
| 0.932218
| 0
| 0.014172
| 0.269444
| 3,960
| 117
| 104
| 33.846154
| 0.81196
| 0.059596
| 0
| 0.797297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.054054
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c1818cc08308030103bff4e6be825fe392fc36d4
| 5,618
|
py
|
Python
|
src/cpp/AutoWIG.py
|
StatisKit/Eigen
|
535af80557c472f735fcd0523959a54b7baeb719
|
[
"Apache-2.0"
] | 1
|
2017-07-17T18:50:28.000Z
|
2017-07-17T18:50:28.000Z
|
src/cpp/AutoWIG.py
|
StatisKit/Eigen
|
535af80557c472f735fcd0523959a54b7baeb719
|
[
"Apache-2.0"
] | null | null | null |
src/cpp/AutoWIG.py
|
StatisKit/Eigen
|
535af80557c472f735fcd0523959a54b7baeb719
|
[
"Apache-2.0"
] | 7
|
2017-02-10T10:31:33.000Z
|
2021-03-15T18:30:10.000Z
|
def controller(asg):
from scons_tools.site_autowig.controller.statiskit_stl import controller as stl_controller
asg = stl_controller(asg, library=False)
# for dcl in asg['::Eigen::internal'].declarations(nested=True):
# dcl.pybind11_export = False
for cls in ['class ::Eigen::DenseBase< class ::Eigen::Matrix< double, 1, -1, 1, 1, -1 > >',
'class ::Eigen::DenseBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >',
'class ::Eigen::DenseBase< class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > >']:
for mtd in asg[cls].methods(access='public'):
if mtd.localname == 'trace':
mtd.pybind11_export = False
for fct in asg.functions():
if fct.localname in ['_check_template_params', 'operator()', 'operator[]']:
fct.pybind11_export = False
for cls in ['Vectors', 'RowVectors', 'Matrices']:
cls = asg['::statiskit::linalg::' + cls].qualified_type.unqualified_type
for ctr in cls.constructors():
if ctr.nb_parameters > 0:
ctr.pybind11_export = False
for method in asg['class ::Eigen::DenseBase< class ::Eigen::Matrix< double, 1, -1, 1, 1, -1 > >'].methods(access='public') + asg['class ::Eigen::DenseBase< class ::Eigen::Matrix< double, 1, -1, 1, 1, -1 > >'].functions():
if method.prototype(desugared=False) == 'void transposeInPlace()':
method.pybind11_export = False
break
for method in asg['class ::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].methods(access='public') + asg['class ::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].functions():
if method.prototype(desugared=False) == 'class ::Eigen::TriangularView< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 >, 10 > triangularView()':
method.pybind11_export = False
break
for method in asg['class ::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].methods(access='public') + asg['class ::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].functions():
if method.prototype(desugared=False) == 'class ::Eigen::TriangularView< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > const, 5 > const triangularView() const':
method.pybind11_export = False
break
for method in asg['class ::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].methods(access='public') + asg['class ::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].functions():
if method.prototype(desugared=False) == 'class ::Eigen::TriangularView< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > const, 2 > const triangularView() const':
method.pybind11_export = False
break
for method in asg['class ::Eigen::PlainObjectBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].methods(access='public') + asg['class ::Eigen::PlainObjectBase< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].functions():
if method.prototype(desugared=False) == 'void conservativeResize(::Eigen::Index )':
method.pybind11_export = False
break
for method in asg['class ::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > >'].methods(access='public') + asg['class ::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > >'].functions():
if method.prototype(desugared=False) == '::Eigen::MatrixBase< class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > >::RealScalar lpNorm() const':
method.pybind11_export = False
break
for method in asg['class ::Eigen::FullPivLU< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].methods(access='public') + asg['class ::Eigen::FullPivLU< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].functions():
if method.prototype(desugared=False) == 'void _solve_impl_transposed(class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > const &, class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > &) const':
method.pybind11_export = False
break
for method in asg['class ::Eigen::DenseBase< class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > >'].methods(access='public') + asg['class ::Eigen::DenseBase< class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > >'].functions():
if method.prototype(desugared=False) == 'void transposeInPlace()':
method.pybind11_export = False
break
for method in asg['class ::Eigen::PartialPivLU< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].methods(access='public') + asg['class ::Eigen::PartialPivLU< class ::Eigen::Matrix< double, -1, -1, 0, -1, -1 > >'].functions():
if method.prototype(desugared=False) == 'void _solve_impl_transposed(class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > const &, class ::Eigen::Matrix< double, -1, 1, 0, -1, 1 > &) const':
method.pybind11_export = False
break
return asg
def generator(asg, module, decorator):
import autowig
import itertools
autowig.generator.plugin = 'pybind11'
nodes = [typedef.qualified_type.unqualified_type for typedef in asg['::statiskit::linalg'].typedefs()]
nodes = list(itertools.chain(*[node.bases(inherited=True) for node in nodes])) + nodes + asg['::statiskit::linalg'].declarations()
wrappers = autowig.generator(asg, nodes, module=module,
decorator=decorator,
closure=False)
return wrappers
| 83.850746
| 241
| 0.60324
| 700
| 5,618
| 4.795714
| 0.132857
| 0.038129
| 0.138219
| 0.190051
| 0.756032
| 0.737563
| 0.721478
| 0.721478
| 0.721478
| 0.721478
| 0
| 0.040217
| 0.212175
| 5,618
| 67
| 242
| 83.850746
| 0.718256
| 0.016732
| 0
| 0.390625
| 0
| 0.421875
| 0.48352
| 0.053966
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.046875
| 0
| 0.109375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c1dc4afc8b036c449196060524e6492682ac24dd
| 24,802
|
py
|
Python
|
psono/restapi/tests/emergencycode.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 48
|
2018-04-19T15:50:58.000Z
|
2022-01-23T15:58:11.000Z
|
psono/restapi/tests/emergencycode.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 9
|
2018-09-13T14:56:18.000Z
|
2020-01-17T16:44:33.000Z
|
psono/restapi/tests/emergencycode.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 11
|
2019-09-20T11:53:47.000Z
|
2021-07-18T22:41:31.000Z
|
from django.urls import reverse
from django.contrib.auth.hashers import make_password
from django.conf import settings
from rest_framework import status
from .base import APITestCaseExtended
from restapi import models
import random
import string
import binascii
import os
class CreateEmergencyCodeTest(APITestCaseExtended):
"""
Test to create a emergency code (PUT)
"""
def setUp(self):
self.test_email = "test@example.com"
self.test_email_bcrypt = "a"
self.test_password = "myPassword"
self.test_authkey = "c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7" \
"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93"
self.test_public_key = "5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649"
self.test_secret_key = "a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71"
self.test_secret_key_enc = "77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422" \
"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011" \
"571a48eb"
self.test_secret_key_nonce = "f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c"
self.test_private_key = "d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c"
self.test_private_key_enc = "abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52" \
"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576" \
"a74b9b2452"
self.test_private_key_nonce = "4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9"
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key_enc,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key_enc,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',
is_email_active=True
)
def test_create_success(self):
"""
Tests to create an emergency code
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
'activation_delay': 3600,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.Emergency_Code.objects.count(), 1)
def test_create_failure_no_description(self):
"""
Tests to create an emergency code without description
"""
url = reverse('emergencycode')
data = {
# 'description': 'Some Description',
'activation_delay': 3600,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_activation_delay(self):
"""
Tests to create an emergency code without activation delay
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
# 'activation_delay': 3600,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_emergency_authkey(self):
"""
Tests to create an emergency code without emergency_authkey
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
'activation_delay': 3600,
# 'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_emergency_data(self):
"""
Tests to create an emergency code without emergency_data
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
'activation_delay': 3600,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
# 'emergency_data': 'a123',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_emergency_data_nonce(self):
"""
Tests to create an emergency code without emergency_data_nonce
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
'activation_delay': 3600,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123',
# 'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_no_emergency_sauce(self):
"""
Tests to create an emergency code without emergency_sauce
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
'activation_delay': 3600,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
# 'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_emergency_data_no_hex(self):
"""
Tests to create an emergency code with a emergency_data that is not hex encoded
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
'activation_delay': -1,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123X',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_emergency_data_nonce_no_hex(self):
"""
Tests to create an emergency code with a emergency_data_nonce that is not hex encoded
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
'activation_delay': -1,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA88X',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_failure_negative_activation_delay(self):
"""
Tests to create an emergency code with a negative activation delay
"""
url = reverse('emergencycode')
data = {
'description': 'Some Description',
'activation_delay': -1,
'emergency_authkey': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',
'emergency_data': 'a123',
'emergency_data_nonce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
'emergency_sauce': 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class DeleteEmergencyCodeTest(APITestCaseExtended):
"""
Test to delete an emergency code (DELETE)
"""
def setUp(self):
self.test_email = "test@example.com"
self.test_email_bcrypt = "a"
self.test_password = "myPassword"
self.test_authkey = "c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7" \
"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93"
self.test_public_key = "5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649"
self.test_secret_key = "a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71"
self.test_secret_key_enc = "77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422" \
"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011" \
"571a48eb"
self.test_secret_key_nonce = "f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c"
self.test_private_key = "d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c"
self.test_private_key_enc = "abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52" \
"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576" \
"a74b9b2452"
self.test_private_key_nonce = "4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9"
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key_enc,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key_enc,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',
is_email_active=True
)
self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt2 = 'a'
self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key2 = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce2 = 'a67fef1ff29eb8f866feaccad336fc6311fa4c71bc183b14c8fceff7416add99'
self.test_user_obj2 = models.User.objects.create(
username=self.test_username2,
email=self.test_email2,
email_bcrypt=self.test_email_bcrypt2,
authkey=make_password(self.test_authkey2),
public_key=self.test_public_key2,
private_key=self.test_private_key2,
private_key_nonce=self.test_private_key_nonce2,
secret_key=self.test_secret_key2,
secret_key_nonce=self.test_secret_key_nonce2,
user_sauce=self.test_user_sauce2,
is_email_active=True
)
self.test_emergency_code_obj = models.Emergency_Code.objects.create(
user = self.test_user_obj,
description = 'Some description',
activation_delay = 3600,
emergency_authkey = make_password('abcd'),
emergency_data = 'a123',
emergency_data_nonce = 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
emergency_sauce = 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
)
def test_delete_success(self):
"""
Tests to delete an emergency code
"""
url = reverse('emergencycode')
data = {
'emergency_code_id': self.test_emergency_code_obj.id,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_failure_missing_emergency_code_id(self):
"""
Tests to delete an emergency code
"""
url = reverse('emergencycode')
data = {
# 'emergency_code_id': self.test_emergency_code_obj.id,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_failure_belongs_to_other_user(self):
"""
Tests to delete an emergency code that belongs to another user
"""
url = reverse('emergencycode')
data = {
'emergency_code_id': self.test_emergency_code_obj.id,
}
self.client.force_authenticate(user=self.test_user_obj2)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_failure_not_exist(self):
"""
Tests to delete an emergency code that does not exist
"""
url = reverse('emergencycode')
data = {
'emergency_code_id': '494d2d69-d4f9-4ab6-8f84-583928add37d',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class ReadEmergencyCodeTest(APITestCaseExtended):
"""
Test to read an emergency code (GET)
"""
def setUp(self):
self.test_email = "test@example.com"
self.test_email_bcrypt = "a"
self.test_password = "myPassword"
self.test_authkey = "c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7" \
"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93"
self.test_public_key = "5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649"
self.test_secret_key = "a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71"
self.test_secret_key_enc = "77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422" \
"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011" \
"571a48eb"
self.test_secret_key_nonce = "f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c"
self.test_private_key = "d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c"
self.test_private_key_enc = "abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52" \
"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576" \
"a74b9b2452"
self.test_private_key_nonce = "4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9"
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key_enc,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key_enc,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',
is_email_active=True
)
self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'
self.test_email_bcrypt2 = "b"
self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'
self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key2 = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce2 = 'a67fef1ff29eb8f866feaccad336fc6311fa4c71bc183b14c8fceff7416add99'
self.test_user_obj2 = models.User.objects.create(
username=self.test_username2,
email=self.test_email2,
email_bcrypt=self.test_email_bcrypt2,
authkey=make_password(self.test_authkey2),
public_key=self.test_public_key2,
private_key=self.test_private_key2,
private_key_nonce=self.test_private_key_nonce2,
secret_key=self.test_secret_key2,
secret_key_nonce=self.test_secret_key_nonce2,
user_sauce=self.test_user_sauce2,
is_email_active=True
)
self.test_emergency_code_obj = models.Emergency_Code.objects.create(
user = self.test_user_obj,
description = 'Some description',
activation_delay = 3600,
emergency_authkey = make_password('abcd'),
emergency_data = 'a123',
emergency_data_nonce = 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
emergency_sauce = 'D5BD6D7FCC2E086CFC28B2B2648ECA591D9F8201608A2D173E167D5B27ECA884',
)
def test_read_emergency_codes_success(self):
"""
Tests to read all emergency_codes
"""
url = reverse('emergencycode')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.data.get('emegency_codes', False))
self.assertEqual(len(response.data.get('emegency_codes')), 1)
emergency_codes = response.data.get('emegency_codes')
emergency_code = emergency_codes[0]
self.assertEqual(emergency_code.get('id'), self.test_emergency_code_obj.id)
self.assertEqual(emergency_code.get('description'), self.test_emergency_code_obj.description)
self.assertEqual(emergency_code.get('activation_delay'), self.test_emergency_code_obj.activation_delay)
def test_read_emergency_codes_success_without_permission(self):
"""
Tests to read all emergency_codes with a user that has no permissions
"""
url = reverse('emergencycode')
data = {}
self.client.force_authenticate(user=self.test_user_obj2)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(response.data.get('emegency_codes', True)) # Empty List
self.assertEqual(len(response.data.get('emegency_codes')), 0)
class UpdateEmergencyCodeTest(APITestCaseExtended):
"""
Test to update an emergency code (POST)
"""
def setUp(self):
self.test_email = "test@example.com"
self.test_email_bcrypt = "a"
self.test_password = "myPassword"
self.test_authkey = "c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7" \
"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93"
self.test_public_key = "5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649"
self.test_secret_key = "a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71"
self.test_secret_key_enc = "77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422" \
"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011" \
"571a48eb"
self.test_secret_key_nonce = "f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c"
self.test_private_key = "d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c"
self.test_private_key_enc = "abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52" \
"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576" \
"a74b9b2452"
self.test_private_key_nonce = "4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9"
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key_enc,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key_enc,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',
is_email_active=True
)
def test_put_emergencycode(self):
"""
Tests to update an emergency code
"""
url = reverse('emergencycode')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
| 42.984402
| 115
| 0.695992
| 2,183
| 24,802
| 7.584975
| 0.078333
| 0.071989
| 0.021017
| 0.024641
| 0.934835
| 0.916898
| 0.913456
| 0.904215
| 0.8896
| 0.874804
| 0
| 0.189666
| 0.227482
| 24,802
| 576
| 116
| 43.059028
| 0.67453
| 0.061164
| 0
| 0.811671
| 0
| 0
| 0.304622
| 0.23115
| 0
| 0
| 0
| 0
| 0.066313
| 1
| 0.055703
| false
| 0.034483
| 0.026525
| 0
| 0.092838
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c1f66cb021f358b7d383b4de73482a2243b6478d
| 681
|
py
|
Python
|
fragmenstein/victor/_victor_overridables.py
|
matteoferla/Fragmenstein
|
151bde01f4ebd930880cb7ad234bab68ac4a3e76
|
[
"MIT"
] | 41
|
2020-04-09T14:11:39.000Z
|
2022-03-15T15:44:14.000Z
|
fragmenstein/victor/_victor_overridables.py
|
LaYeqa/Fragmenstein
|
151bde01f4ebd930880cb7ad234bab68ac4a3e76
|
[
"MIT"
] | 13
|
2020-12-02T13:13:59.000Z
|
2022-01-14T11:29:46.000Z
|
fragmenstein/victor/_victor_overridables.py
|
LaYeqa/Fragmenstein
|
151bde01f4ebd930880cb7ad234bab68ac4a3e76
|
[
"MIT"
] | 6
|
2020-09-07T10:47:51.000Z
|
2021-09-23T14:22:39.000Z
|
from ._victor_plonk import _VictorPlonk
class _VictorOverridables(_VictorPlonk):
def post_params_step(self):
"""
This method is intended for make inherited mods easier.
:return:
"""
pass
def post_monster_step(self):
"""
This method is intended for make inherited mods easier.
:return:
"""
pass
def pose_mod_step(self):
"""
This method is intended for make inherited mods easier.
:return:
"""
pass
def post_igor_step(self):
"""
This method is intended for make inherited mods easier.
:return:
"""
pass
| 21.967742
| 63
| 0.562408
| 72
| 681
| 5.138889
| 0.375
| 0.086486
| 0.12973
| 0.194595
| 0.737838
| 0.737838
| 0.737838
| 0.737838
| 0.737838
| 0.737838
| 0
| 0
| 0.361233
| 681
| 31
| 64
| 21.967742
| 0.850575
| 0.380323
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0.1
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
a9b8cfbdb5b63265f995b002e7b55f83688c23dc
| 784
|
py
|
Python
|
za/udp/test3.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
za/udp/test3.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
za/udp/test3.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
#%%
ss ='00 80 e1 13 4f 24 04 0e 3c 90 f5 58 08 00 45 00 00 23 a1 c2 00 00 80 11 15 83 c0 a8 01 17 c0 a8 01 1d 17 71 17 71 00 0f f5 6d 31 36 62 62 62 62 62 00 00 00 00 00 00 00 00 00 00 00 '
ss2='00 80 e1 13 4f 24 04 0e 3c 90 f5 58 08 00 45 00 00 23 a1 c6 00 00 80 11 15 7f c0 a8 01 17 c0 a8 01 1d 17 71 17 71 00 0f 17 33 a9 4c 29 10 00 01 00 00 00 00 00 01 20 45 4d 45 42 46 '
ss3='ff ff ff ff ff ff 04 0e 3c 90 f5 58 08 00 45 00 00 60 cf 57 00 00 80 11 e6 ce c0 a8 01 17 c0 a8 01 ff 00 89 00 89 00 4c 17 33 a9 4c 29 10 00 01 00 00 00 00 00 01 20 45 4d 45 42 46 41 46 45 45 50 46 41 43 4e 44 43 46 42 44 46 46 44 46 41 44 43 45 4b 46 43 41 41 00 00 20 00 01 c0 0c 00 20 00 01 00 04 93 e0 00 06 00 00 c0 a8 01 17 '
dd = ss.split(' ')
dd.index('0f')
# %%
0xf8+40
# %%
256+32
# %%
| 52.266667
| 336
| 0.633929
| 243
| 784
| 2.045267
| 0.283951
| 0.209256
| 0.181087
| 0.193159
| 0.607646
| 0.54326
| 0.54326
| 0.515091
| 0.515091
| 0.515091
| 0
| 0.758945
| 0.322704
| 784
| 14
| 337
| 56
| 0.177024
| 0.014031
| 0
| 0
| 0
| 0.428571
| 0.90117
| 0
| 0
| 1
| 0.005202
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a9f23570f22f0c9d8958a77a80b33c890b5c28d9
| 2,515
|
py
|
Python
|
tests/endpoints/test_cash_register.py
|
PJUllrich/Universal-Bunq-API-Python-Wrapper
|
9e1e0d1477d502c07fb9f31095e55b945b47b062
|
[
"MIT"
] | null | null | null |
tests/endpoints/test_cash_register.py
|
PJUllrich/Universal-Bunq-API-Python-Wrapper
|
9e1e0d1477d502c07fb9f31095e55b945b47b062
|
[
"MIT"
] | null | null | null |
tests/endpoints/test_cash_register.py
|
PJUllrich/Universal-Bunq-API-Python-Wrapper
|
9e1e0d1477d502c07fb9f31095e55b945b47b062
|
[
"MIT"
] | null | null | null |
from apiwrapper.endpoints.cash_register import CashRegister
from tests.endpoints.test_endpoint import EndpointTest
class CashRegisterTest(EndpointTest):
__base_endpoint_url = "/user/%d/monetary-account/%d/cash-register"
@property
def _base_endpoint(self):
return self.__base_endpoint_url % (self.random_id, self.random_id)
def setUp(self):
super().setUp(CashRegister)
def test_get_base_endpoint(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check = self.test_class._get_base_endpoint(
self.random_id, self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_all_cash_registers_for_account(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check = self.test_class.get_all_cash_registers_for_account(
self.random_id, self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_cash_register_by_id(self):
endpoint_should_be = self._base_endpoint
endpoint_should_be += "/%d" % self.random_id
endpoint_to_check = self.test_class.get_cash_register_by_id(
self.random_id, self.random_id, self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_all_qr_codes_for_cash_register(self):
endpoint_should_be = self._base_endpoint
endpoint_should_be += "/%d/qr-code" % self.random_id
endpoint_to_check = self.test_class.get_all_qr_codes_for_cash_register(
self.random_id, self.random_id, self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_qr_code_by_id(self):
endpoint_should_be = self._base_endpoint
endpoint_should_be += "/%d/qr-code/%d" % (self.random_id, self.random_id)
endpoint_to_check = self.test_class.get_qr_code_by_id(
self.random_id, self.random_id, self.random_id, self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_content_for_qr_code(self):
endpoint_should_be = self._base_endpoint
endpoint_should_be += "/%d/qr-code/%d/content" % (
self.random_id,
self.random_id
)
endpoint_to_check = self.test_class.get_content_for_qr_code(
self.random_id, self.random_id, self.random_id, self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
| 36.449275
| 81
| 0.726839
| 352
| 2,515
| 4.696023
| 0.119318
| 0.15729
| 0.188748
| 0.203267
| 0.813067
| 0.813067
| 0.757411
| 0.729583
| 0.699335
| 0.699335
| 0
| 0
| 0.192445
| 2,515
| 68
| 82
| 36.985294
| 0.813885
| 0
| 0
| 0.391304
| 0
| 0
| 0.036581
| 0.025447
| 0
| 0
| 0
| 0
| 0.130435
| 1
| 0.173913
| false
| 0
| 0.043478
| 0.021739
| 0.282609
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e77d068de050af8d157b873cf3f9e8adf3026289
| 33,055
|
py
|
Python
|
Proyecto/Proyecto.py
|
rinicro/Machine-Learning-and-Big-Data
|
b35899c13202d2102a0f093ad2f023a9802b754d
|
[
"MIT"
] | null | null | null |
Proyecto/Proyecto.py
|
rinicro/Machine-Learning-and-Big-Data
|
b35899c13202d2102a0f093ad2f023a9802b754d
|
[
"MIT"
] | null | null | null |
Proyecto/Proyecto.py
|
rinicro/Machine-Learning-and-Big-Data
|
b35899c13202d2102a0f093ad2f023a9802b754d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Proyecto de la asignatura Aprendizaje Automático y Big Data
Rubén Ruperto Díaz y Rafael Herrera Troca
'''
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.model_selection import train_test_split
from scipy.optimize import fmin_tnc, minimize
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
os.chdir("./resources")
#%% Funciones auxiliares
# Función sigmoide
def sigmoide(z):
return 1 / (1 + np.exp(-z))
# Derivada de la función sigmoide
def diffSigmoide(a):
return a * (1 - a)
## Funciones para regresión logística (práctica 2):
# Función de coste
def P1coste(theta, X, Y, reg=0):
gXTheta = sigmoide(np.dot(X, theta))
factor = np.dot(np.log(gXTheta).T, Y) + np.dot(np.log(1 - gXTheta).T,
1-Y)
return -1 / len(Y) * factor + reg / (2 * len(Y)) * np.sum(theta**2)
# Gradiente de la función de coste
def P1gradiente(theta, X, Y, reg=0):
gXTheta = sigmoide(np.dot(X, theta))
thetaJ = np.concatenate(([0], theta[1:]))
return 1 / len(Y) * np.dot(X.T, gXTheta-Y) + reg / len(Y) * thetaJ
# Función que devuelve el porcentaje de acierto de un resultado
# según el valor real
def P1porc_ac(X, Y, theta):
gXTheta = sigmoide(np.dot(X, theta))
resultados = [((gXTheta >= 0.5) & (Y == 1)) | ((gXTheta < 0.5)
& (Y == 0))]
return np.count_nonzero(resultados) / len(Y) * 100
## Funciones para redes neuronales (práctica 4):
# Devuelve una matriz de pesos aleatorios con la dimensión dada
def P2randomWeights(l_in, l_out):
eps = np.sqrt(6)/np.sqrt(l_in + l_out)
rnd = np.random.random((l_out, l_in+1)) * (2*eps) - eps
return rnd
# Dada la entrada 'X' y los pesos 'theta' de una capa de una red
# neuronal, aplica los pesos y devuelve la salida de la capa
def P2applyLayer(X, theta):
thetaX = np.dot(X, theta.T)
return sigmoide(thetaX)
# Dada la entrada 'X' y el array de matrices de pesos 'theta',
# devuelve la entrada de cada capa y el resultado final devuelto
# por la red neuronal
def P2applyNet(X, theta):
lay = X.copy()
a = []
for i in range(len(theta)):
lay = np.hstack((np.array([np.ones(len(lay))]).T, lay))
a.append(lay.copy())
lay = P2applyLayer(lay, theta[i])
return lay,a
# Calcula la función de coste de una red neuronal para la
# salida esperada 'y', el resultado de la red 'h_theta', el array
# de matrices de pesos 'theta' y el término de regularización 'reg'
def P2coste(y, h_theta, theta, reg):
sumandos = -y * np.log(h_theta) - (1-y) * np.log(1-h_theta)
regul = 0
for i in range(len(theta)):
regul += np.sum(theta[i][:,1:]**2)
result = np.sum(sumandos) / len(y) + reg * regul / (2*len(y))
return result
# Calcula el gradiente de la función de coste haciendo
# retropropagación dada la salida esperada 'y', la entrada
# de cada capa 'a', la salida de la red 'h_theta', el array de
# matrices de pesos 'theta' y el término de regularización 'reg'
def P2gradiente(y, a, h_theta, theta, reg):
d = h_theta - y
delta = [np.dot(d.T, a[-1]) / len(y)]
for i in range(len(theta)-1,0,-1):
d = np.dot(d, theta[i]) * diffSigmoide(a[i])
d = d[:,1:]
delta.insert(0, np.dot(d.T, a[i-1]) / len(y))
for i in range(len(delta)):
delta[i][:,1:] += reg * theta[i][:,1:] / len(y)
return delta
# Calcula y devuelve el coste y el gradiente de una red neuronal
# dados todos los pesos en el array 'param_rn', las dimensiones
# de cada capa en 'capas', los datos de entrada 'X', la salida
# esperada 'y' y el término de regularización 'reg'
def P2backprop(params_rn, capas, X, Y, reg):
# Convertimos el vector de todos los pesos en las distintas
# matrices
theta = [np.reshape(params_rn[:capas[1]*(capas[0]+1)],
(capas[1],capas[0]+1))]
gastados = capas[1]*(capas[0]+1)
for i in range(len(capas)-2):
theta.append(np.reshape(params_rn[gastados:gastados+capas[i+2]*
(capas[i+1]+1)],(capas[i+2],capas[i+1]+1)))
gastados += capas[i+2]*(capas[i+1]+1)
# Aplicamos la red neuronal
h_theta,a = P2applyNet(X, theta)
cost = P2coste(Y, h_theta, theta, reg)
grad = P2gradiente(Y, a, h_theta, theta, reg)
g = np.array([])
for i in range(len(grad)):
g = np.concatenate((g, grad[i].ravel()))
return cost, g
# Calcula el porcentaje de acierto dada la respuesta de la red y el
# resultado real
def P2porc_ac(res, Y):
resultados = [((res >= 0.5) & (Y == 1)) | ((res < 0.5) & (Y == 0))]
return np.count_nonzero(resultados) / len(Y) * 100
#%% Lectura y estudio de los datos
np.random.seed(27)
data = pd.read_csv('mushrooms.csv')
# Transformamos
Y = data['class'].replace({'p':0, 'e':1})
X = pd.get_dummies(data.drop('class', axis=1))
# Dividimos los datos en entrenamiento, validación y test
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.2,
random_state=0, shuffle=True, stratify=Y)
Xtrain, Xval, Ytrain, Yval = train_test_split(Xtrain, Ytrain,
test_size=0.25, random_state=0, shuffle=True, stratify=Ytrain)
# Preparamos los datos
Xtrain2 = np.hstack((np.array([np.ones(len(Ytrain))]).T, Xtrain))
Xval2 = np.hstack((np.array([np.ones(len(Yval))]).T, Xval))
Xtest2 = np.hstack((np.array([np.ones(len(Ytest))]).T, Xtest))
Ytrain2 = np.array([Ytrain]).T
Yval2 = np.array([Yval]).T
Ytest2 = np.array([Ytest]).T
# Representamos un histograma para cada variable según la distribución de
# champiñones venenosos y comestibles para cada posible valor
for name in data.columns[1:]:
plt.figure(figsize=(10,10))
plt.title("Número de venenosos y comestibles según " + name)
values = data[name].value_counts().axes[0].to_list()
cuentaP = []
cuentaE = []
for v in values:
cuentaP.append(len(data[(data[name]==v) & (data['class']=='p')]))
cuentaE.append(len(data[(data[name]==v) & (data['class']=='e')]))
plt.bar(np.arange(len(values)), cuentaP, 0.4, color='darkorchid')
plt.bar(np.arange(len(values))+0.4, cuentaE, 0.4, color='greenyellow')
plt.ylabel('Número de casos')
plt.xlabel(name)
plt.xticks(np.arange(len(values))+0.2, values)
plt.savefig("var" + name + ".pdf", format='pdf')
plt.show()
#%% Parte 1: Regresión logística
# Entrenamos la regresión con distintos valores para el término de
# regularización
theta0 = np.zeros(np.shape(Xtrain2)[1])
regValues = range(-10, 4)
thetas = []
errorTrain = []
acTrain = []
errorVal = []
acVal = []
for reg in regValues:
theta = fmin_tnc(func=P1coste, x0=theta0, fprime=P1gradiente,
args=(Xtrain2, Ytrain, 10**reg))[0]
thetas.append(theta)
errorTrain.append(P1coste(theta, Xtrain2, Ytrain))
acTrain.append(P1porc_ac(Xtrain2, Ytrain, theta))
errorVal.append(P1coste(theta, Xval2, Yval))
acVal.append(P1porc_ac(Xval2, Yval, theta))
# Comprobamos el error y el pocentaje de acierto según el término de
# regularización
opt = np.argmin(errorVal)
print('El valor óptimo del parámetro de regularización es',
10**regValues[opt])
plt.figure(figsize=(10,10))
plt.plot(regValues, acTrain, 'r', label="Entrenamiento")
plt.plot(regValues, acVal, 'b', label="Validación")
plt.title(r"Porcentaje de acierto según $\lambda$")
plt.xlabel(r"Valor de $\lambda = 10^x$")
plt.ylabel("Porcentaje de acierto")
plt.legend(loc="lower left")
plt.savefig("aciertoLogistica.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.plot(regValues, errorTrain, 'r', label="Entrenamiento")
plt.plot(regValues, errorVal, 'b', label="Validación")
plt.title(r"Error según $\lambda$")
plt.xlabel(r"Valor de $\lambda = 10^x$")
plt.ylabel("Error")
plt.legend(loc="upper left")
plt.savefig("errorLogistica.pdf", format='pdf')
plt.show()
# Calculamos el porcentaje de acierto sobre los datos de test para el
# valor escogido del término de regularización
ac = P1porc_ac(Xtest2, Ytest, thetas[opt])
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
#%% Parte 2: Redes neurales
# Creamos unas matrices inciales con pesos aleatorios
size2 = 25
theta01 = P2randomWeights(np.shape(Xtrain)[1], size2)
theta02 = P2randomWeights(size2, 1)
theta0 = np.concatenate((theta01.ravel(), theta02.ravel()))
regValues = range(-6, 4)
itera = range(10, 110, 10)
errorTrain = np.zeros((len(regValues), len(itera)))
acTrain = np.zeros((len(regValues), len(itera)))
errorVal = np.zeros((len(regValues), len(itera)))
acVal = np.zeros((len(regValues), len(itera)))
for i in range(len(regValues)):
for j in range(len(itera)):
theta = minimize(fun=P2backprop, x0=theta0,
args=((np.shape(Xtrain)[1],size2,1), Xtrain, Ytrain2,
10**regValues[i]), method='TNC', jac=True,
options={'maxiter':itera[j]})['x']
theta1 = np.reshape(theta[:size2*(np.shape(Xtrain)[1]+1)],
(size2,np.shape(Xtrain)[1]+1))
theta2 = np.reshape(theta[size2*(np.shape(Xtrain)[1]+1):],
(1,size2+1))
resTrain = P2applyNet(Xtrain, (theta1, theta2))[0]
acTrain[i][j] = P2porc_ac(resTrain, Ytrain2)
resVal = P2applyNet(Xval, (theta1, theta2))[0]
acVal[i][j] = P2porc_ac(resVal, Yval2)
errorTrain[i][j] = P2coste(Ytrain2, resTrain, [theta1,theta2], 0)
errorVal[i][j] = P2coste(Yval2, resVal, [theta1,theta2], 0)
# Comprobamos el error y el pocentaje de acierto según el término de
# regularización y el número de iteraciones
opt = np.argmin(errorVal)
optReg, optItera = 10**regValues[opt//len(itera)], itera[opt%len(itera)]
print('El valor óptimo del parámetro de regularización es', optReg)
print('El valor óptimo para el número de iteraciones es', optItera)
xLabels = [str(it) for it in itera]
yLabels = [r'$10^{' + str(r) + '}$' for r in regValues]
plt.figure(figsize=(10,10))
plt.title(r"Porcentaje de aciertos según el valor de $\lambda$ y" +
" el número de iteraciones")
plt.ylabel(r'$\lambda$')
plt.xlabel('Iteraciones')
fig = plt.subplot()
im = fig.imshow(acVal, cmap=cm.viridis)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(xLabels)):
for j in range(len(yLabels)):
text = fig.text(i, j, round(acVal[j][i],2), ha="center",
va="center", color=("k" if acVal[j][i] > 93 else "w"))
fig.set_xticks(np.arange(len(xLabels)))
fig.set_yticks(np.arange(len(yLabels)))
fig.set_xticklabels(xLabels)
fig.set_yticklabels(yLabels)
plt.savefig("aciertoValNeuronal.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.title(r"Porcentaje de aciertos según el valor de $\lambda$ y " +
"el número de iteraciones")
plt.ylabel(r'$\lambda$')
plt.xlabel('Iteraciones')
fig = plt.subplot()
im = fig.imshow(acTrain, cmap=cm.viridis)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(xLabels)):
for j in range(len(yLabels)):
text = fig.text(i, j, round(acTrain[j][i],2), ha="center",
va="center", color=("k" if acTrain[j][i] > 93 else "w"))
fig.set_xticks(np.arange(len(xLabels)))
fig.set_yticks(np.arange(len(yLabels)))
fig.set_xticklabels(xLabels)
fig.set_yticklabels(yLabels)
plt.savefig("aciertoTrainNeuronal.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.title(r"Error según el valor de $\lambda$ y el número de iteraciones")
plt.ylabel(r'$\lambda$')
plt.xlabel('Iteraciones')
fig = plt.subplot()
im = fig.imshow(np.log10(errorVal), cmap=cm.viridis_r)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(xLabels)):
for j in range(len(yLabels)):
text = fig.text(i, j, round(np.log10(errorVal[j][i]),3),
ha="center", va="center",
color=("k" if np.log10(errorVal[j][i]) < -5 else "w"))
fig.set_xticks(np.arange(len(xLabels)))
fig.set_yticks(np.arange(len(yLabels)))
fig.set_xticklabels(xLabels)
fig.set_yticklabels(yLabels)
plt.savefig("errorValNeuronal.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.title(r"Error según el valor de $\lambda$ y el número de iteraciones")
plt.ylabel(r'$\lambda$')
plt.xlabel('Iteraciones')
fig = plt.subplot()
im = fig.imshow(np.log10(errorTrain), cmap=cm.viridis_r)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(xLabels)):
for j in range(len(yLabels)):
text = fig.text(i, j, round(np.log10(errorTrain[j][i]),3),
ha="center", va="center",
color=("k" if np.log10(errorTrain[j][i]) < -5 else "w"))
fig.set_xticks(np.arange(len(xLabels)))
fig.set_yticks(np.arange(len(yLabels)))
fig.set_xticklabels(xLabels)
fig.set_yticklabels(yLabels)
plt.savefig("errorTrainNeuronal.pdf", format='pdf')
plt.show()
# Calculamos el porcentaje de acierto sobre los datos de test para el
# valor escogido del término de regularización y de iteraciones
theta = minimize(fun=P2backprop, x0=theta0,
args=((np.shape(Xtrain)[1],size2,1), Xtrain, Ytrain2,
optReg), method='TNC', jac=True, options={'maxiter':optItera})['x']
theta1 = np.reshape(theta[:size2*(np.shape(Xtrain)[1]+1)],
(size2,np.shape(Xtrain)[1]+1))
theta2 = np.reshape(theta[size2*(np.shape(Xtrain)[1]+1):],(1,size2+1))
resTest = P2applyNet(Xtest, (theta1, theta2))[0]
ac = P2porc_ac(resTest, Ytest2)
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
#%% Parte 3: Máquinas de soporte vectorial
# Comenzamos usando kernel lineal y distintos valores de C
parValues = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300]
acTrain = []
acVal = []
for C in parValues:
svm = SVC(kernel='linear', C=C)
svm.fit(Xtrain,Ytrain)
acTrain.append(svm.score(Xtrain,Ytrain) * 100)
acVal.append(svm.score(Xval,Yval) * 100)
# Comprobamos el porcentaje de acierto según el valor de C
opt = np.argmax(acVal)
print('El valor óptimo de C es', parValues[opt])
plt.figure(figsize=(10,10))
plt.plot(range(len(parValues)), acTrain, 'r', label="Entrenamiento")
plt.plot(range(len(parValues)), acVal, 'b', label="Validación")
plt.title(r"Porcentaje de acierto según $C$")
plt.xlabel(r"Valor de $C$")
plt.xticks(range(len(parValues)), parValues)
plt.ylabel("Porcentaje de acierto")
plt.legend(loc="lower left")
plt.savefig("aciertoSVM.pdf", format='pdf')
plt.show()
# Calculamos el porcentaje de acierto sobre los datos de test para el
# valor escogido de C
svm = SVC(kernel='linear', C=parValues[opt])
svm.fit(Xtrain,Ytrain)
ac = svm.score(Xtest,Ytest) * 100
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
# Probamos ahora con kernel gaussiano utilizando distintos valores de C
# y de sigma
acTrain = np.zeros((len(regValues), len(regValues)))
acVal = np.zeros((len(regValues), len(regValues)))
for i in range(len(parValues)):
for j in range(len(parValues)):
svm = SVC(kernel='rbf', C=parValues[i],
gamma=1/(2*parValues[j]**2))
svm.fit(Xtrain,Ytrain)
acTrain[i][j] = svm.score(Xtrain,Ytrain) * 100
acVal[i][j] = svm.score(Xval,Yval) * 100
# Comprobamos el pocentaje de acierto según los valores de C y sigma
opt = np.argmax(acVal)
optC, optSigma = parValues[opt//len(parValues)], parValues[opt%len(parValues)]
print('El valor óptimo del parámetro C es', optC)
print('El valor óptimo para el parámetro sigma es', optSigma)
plt.figure(figsize=(10,10))
plt.title(r"Porcentaje de aciertos según los valores de $\sigma$ y C")
plt.ylabel('$C$')
plt.xlabel(r'$\sigma$')
fig = plt.subplot()
im = fig.imshow(acTrain, cmap=cm.viridis)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(parValues)):
for j in range(len(parValues)):
text = fig.text(i, j, round(acTrain[j][i],2), ha="center",
va="center", color=("k" if acTrain[j][i] > 70 else "w"))
fig.set_xticks(np.arange(len(parValues)))
fig.set_yticks(np.arange(len(parValues)))
fig.set_xticklabels(parValues)
fig.set_yticklabels(parValues)
plt.savefig("aciertoTrainSVM.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.title(r"Porcentaje de aciertos según los valores de $\sigma$ y C")
plt.ylabel('$C$')
plt.xlabel(r'$\sigma$')
fig = plt.subplot()
im = fig.imshow(acVal, cmap=cm.viridis)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(parValues)):
for j in range(len(parValues)):
text = fig.text(i, j, round(acVal[j][i],2), ha="center",
va="center", color=("k" if acVal[j][i] > 70 else "w"))
fig.set_xticks(np.arange(len(parValues)))
fig.set_yticks(np.arange(len(parValues)))
fig.set_xticklabels(parValues)
fig.set_yticklabels(parValues)
plt.savefig("aciertoValSVM.pdf", format='pdf')
plt.show()
# Calculamos el porcentaje de acierto sobre los datos de test para el
# valor escogido de C y sigma
svm = SVC(kernel='rbf', C=optC, gamma=1/(2*optSigma**2))
svm.fit(Xtrain,Ytrain)
ac = svm.score(Xtest,Ytest) * 100
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
#%% Parte 4: K-Medias
# Entrenamos un K-Medias con 2 clusters
kmeans = KMeans(n_clusters=2, random_state=0).fit(Xtrain)
trainLabels = kmeans.labels_
valLabels = kmeans.predict(Xval)
# Comprobamos el pocentaje de acierto según la interpretación de las etiquetas
acTrainA = np.count_nonzero(trainLabels == Ytrain) / len(Ytrain) * 100
acTrainB = np.count_nonzero(trainLabels != Ytrain) / len(Ytrain) * 100
acValA = np.count_nonzero(valLabels == Yval) / len(Yval) * 100
acValB = np.count_nonzero(valLabels != Yval) / len(Yval) * 100
print('Interpretando las etiquetas de forma directa, el entrenamiento ' +
'obtiene un porcentaje de acierto del ', acTrainA, '%')
print('Interpretando las etiquetas de forma inversa, el entrenamiento ' +
'obtiene un porcentaje de acierto del ', acTrainB, '%')
print('Interpretando las etiquetas de forma directa, la validación ' +
'obtiene un porcentaje de acierto del ', acValA, '%')
print('Interpretando las etiquetas de forma inversa, la validación ' +
'obtiene un porcentaje de acierto del ', acValB, '%')
# Calculamos el porcentaje de acierto sobre los datos de test para la
# interpretación escogida de las etiquetas
testLabels = kmeans.predict(Xtest)
ac = (np.count_nonzero(testLabels == Ytest) if acValA > acValB
else np.count_nonzero(testLabels != Ytest)) / len(Ytest) * 100
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
#%% Parte 5: Reducción de dimensionalidad
# Aplicamos PCA y comprobamos la varianza explicada para cada componente
pca = PCA()
XtrainR = pca.fit_transform(Xtrain)
expVar = pca.explained_variance_ratio_
expVarAcum = [expVar[0]]
for i in range(1, len(expVar)):
expVarAcum.append(expVar[i] + expVarAcum[-1])
print("La varianza explicada acumulada es:", np.array(expVarAcum) * 100)
#%% Alternativa: Eliminar la variable 'olor' correlacionada
print("A partir de este punto repetimos los experimentos eliminando la " +
"variable 'odor', ya que está fuertemente correlacionada con la " +
"variable objetivo")
# Preparamos un conjunto adicional de datos eliminando la variable 'odor'
odor_lab = []
for c in Xtrain.columns:
if c[:4] == 'odor':
odor_lab.append(c)
Wtrain = Xtrain.copy().drop(odor_lab, 1)
Wval = Xval.copy().drop(odor_lab, 1)
Wtest = Xtest.copy().drop(odor_lab, 1)
Wtrain2 = np.hstack((np.array([np.ones(len(Ytrain))]).T, Wtrain))
Wval2 = np.hstack((np.array([np.ones(len(Yval))]).T, Wval))
Wtest2 = np.hstack((np.array([np.ones(len(Ytest))]).T, Wtest))
#%% Parte 1b: Regresión logística
# Entrenamos la regresión con distintos valores para el término de
# regularización
theta0 = np.zeros(np.shape(Wtrain2)[1])
regValues = range(-10, 4)
thetas = []
errorTrain = []
acTrain = []
errorVal = []
acVal = []
for reg in regValues:
theta = fmin_tnc(func=P1coste, x0=theta0, fprime=P1gradiente,
args=(Wtrain2, Ytrain, 10**reg))[0]
thetas.append(theta)
errorTrain.append(P1coste(theta, Wtrain2, Ytrain))
acTrain.append(P1porc_ac(Wtrain2, Ytrain, theta))
errorVal.append(P1coste(theta, Wval2, Yval))
acVal.append(P1porc_ac(Wval2, Yval, theta))
# Comprobamos el error y el pocentaje de acierto según el término de
# regularización
opt = np.argmin(errorVal)
print('El valor óptimo del parámetro de regularización es',
10**regValues[opt])
plt.figure(figsize=(10,10))
plt.plot(regValues, acTrain, 'r', label="Entrenamiento")
plt.plot(regValues, acVal, 'b', label="Validación")
plt.title(r"Porcentaje de acierto según $\lambda$")
plt.xlabel(r"Valor de $\lambda = 10^x$")
plt.ylabel("Porcentaje de acierto")
plt.legend(loc="lower left")
plt.savefig("aciertoLogisticaNO.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.plot(regValues, errorTrain, 'r', label="Entrenamiento")
plt.plot(regValues, errorVal, 'b', label="Validación")
plt.title(r"Error según $\lambda$")
plt.xlabel(r"Valor de $\lambda = 10^x$")
plt.ylabel("Error")
plt.legend(loc="upper left")
plt.savefig("errorLogisticaNO.pdf", format='pdf')
plt.show()
# Calculamos el porcentaje de acierto sobre los datos de test para el
# valor escogido del término de regularización
ac = P1porc_ac(Wtest2, Ytest, thetas[opt])
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
#%% Parte 2b: Redes neuronales
# Creamos unas matrices inciales con pesos aleatorios
size2 = 25
theta01 = P2randomWeights(np.shape(Wtrain)[1], size2)
theta02 = P2randomWeights(size2, 1)
theta0 = np.concatenate((theta01.ravel(), theta02.ravel()))
regValues = range(-6, 4)
itera = range(10, 110, 10)
errorTrain = np.zeros((len(regValues), len(itera)))
acTrain = np.zeros((len(regValues), len(itera)))
errorVal = np.zeros((len(regValues), len(itera)))
acVal = np.zeros((len(regValues), len(itera)))
for i in range(len(regValues)):
for j in range(len(itera)):
theta = minimize(fun=P2backprop, x0=theta0,
args=((np.shape(Wtrain)[1],size2,1), Wtrain, Ytrain2,
10**regValues[i]), method='TNC', jac=True,
options={'maxiter':itera[j]})['x']
theta1 = np.reshape(theta[:size2*(np.shape(Wtrain)[1]+1)],
(size2,np.shape(Wtrain)[1]+1))
theta2 = np.reshape(theta[size2*(np.shape(Wtrain)[1]+1):],
(1,size2+1))
resTrain = P2applyNet(Wtrain, (theta1, theta2))[0]
acTrain[i][j] = P2porc_ac(resTrain, Ytrain2)
resVal = P2applyNet(Wval, (theta1, theta2))[0]
acVal[i][j] = P2porc_ac(resVal, Yval2)
errorTrain[i][j] = P2coste(Ytrain2, resTrain, [theta1,theta2], 0)
errorVal[i][j] = P2coste(Yval2, resVal, [theta1,theta2], 0)
# Comprobamos el error y el pocentaje de acierto según el término de
# regularización y el número de iteraciones
opt = np.argmin(errorVal)
optReg, optItera = 10**regValues[opt//len(itera)], itera[opt%len(itera)]
print('El valor óptimo del parámetro de regularización es', optReg)
print('El valor óptimo para el número de iteraciones es', optItera)
xLabels = [str(it) for it in itera]
yLabels = [r'$10^{' + str(r) + '}$' for r in regValues]
plt.figure(figsize=(10,10))
plt.title(r"Porcentaje de aciertos según el valor de $\lambda$ y el " +
"número de iteraciones")
plt.ylabel(r'$\lambda$')
plt.xlabel('Iteraciones')
fig = plt.subplot()
im = fig.imshow(acVal, cmap=cm.viridis)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(xLabels)):
for j in range(len(yLabels)):
text = fig.text(i, j, round(acVal[j][i],2), ha="center",
va="center", color=("k" if acVal[j][i] > 93 else "w"))
fig.set_xticks(np.arange(len(xLabels)))
fig.set_yticks(np.arange(len(yLabels)))
fig.set_xticklabels(xLabels)
fig.set_yticklabels(yLabels)
plt.savefig("aciertoValNeuronalNO.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.title(r"Porcentaje de aciertos según el valor de $\lambda$ y el " +
"número de iteraciones")
plt.ylabel(r'$\lambda$')
plt.xlabel('Iteraciones')
fig = plt.subplot()
im = fig.imshow(acTrain, cmap=cm.viridis)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(xLabels)):
for j in range(len(yLabels)):
text = fig.text(i, j, round(acTrain[j][i],2), ha="center",
va="center", color=("k" if acTrain[j][i] > 93 else "w"))
fig.set_xticks(np.arange(len(xLabels)))
fig.set_yticks(np.arange(len(yLabels)))
fig.set_xticklabels(xLabels)
fig.set_yticklabels(yLabels)
plt.savefig("aciertoTrainNeuronalNO.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.title(r"Error según el valor de $\lambda$ y el número de iteraciones")
plt.ylabel(r'$\lambda$')
plt.xlabel('Iteraciones')
fig = plt.subplot()
im = fig.imshow(np.log10(errorVal), cmap=cm.viridis_r)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(xLabels)):
for j in range(len(yLabels)):
text = fig.text(i, j, round(np.log10(errorVal[j][i]),3),
ha="center", va="center",
color=("k" if np.log10(errorVal[j][i]) < -5 else "w"))
fig.set_xticks(np.arange(len(xLabels)))
fig.set_yticks(np.arange(len(yLabels)))
fig.set_xticklabels(xLabels)
fig.set_yticklabels(yLabels)
plt.savefig("errorValNeuronalNO.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.title(r"Error según el valor de $\lambda$ y el número de iteraciones")
plt.ylabel(r'$\lambda$')
plt.xlabel('Iteraciones')
fig = plt.subplot()
im = fig.imshow(np.log10(errorTrain), cmap=cm.viridis_r)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(xLabels)):
for j in range(len(yLabels)):
text = fig.text(i, j, round(np.log10(errorTrain[j][i]),3),
ha="center", va="center",
color=("k" if np.log10(errorTrain[j][i]) < -5 else "w"))
fig.set_xticks(np.arange(len(xLabels)))
fig.set_yticks(np.arange(len(yLabels)))
fig.set_xticklabels(xLabels)
fig.set_yticklabels(yLabels)
plt.savefig("errorTrainNeuronalNO.pdf", format='pdf')
plt.show()
# Calculamos el porcentaje de acierto sobre los datos de test para el
# valor escogido del término de regularización y de iteraciones
theta = minimize(fun=P2backprop, x0=theta0,
args=((np.shape(Wtrain)[1],size2,1), Wtrain, Ytrain2, optReg),
method='TNC', jac=True, options={'maxiter':optItera})['x']
theta1 = np.reshape(theta[:size2*(np.shape(Wtrain)[1]+1)],
(size2,np.shape(Wtrain)[1]+1))
theta2 = np.reshape(theta[size2*(np.shape(Wtrain)[1]+1):],(1,size2+1))
resTest = P2applyNet(Wtest, (theta1, theta2))[0]
ac = P2porc_ac(resTest, Ytest2)
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
#%% Parte 3b: Máquinas de soporte vectorial
# Comenzamos usando kernel lineal y distintos valores de C
parValues = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300]
acTrain = []
acVal = []
for C in parValues:
svm = SVC(kernel='linear', C=C)
svm.fit(Wtrain,Ytrain)
acTrain.append(svm.score(Wtrain,Ytrain) * 100)
acVal.append(svm.score(Wval,Yval) * 100)
# Comprobamos el porcentaje de acierto según el valor de C
opt = np.argmax(acVal)
print('El valor óptimo de C es', parValues[opt])
plt.figure(figsize=(10,10))
plt.plot(range(len(parValues)), acTrain, 'r', label="Entrenamiento")
plt.plot(range(len(parValues)), acVal, 'b', label="Validación")
plt.title(r"Porcentaje de acierto según $C$")
plt.xlabel(r"Valor de $C$")
plt.xticks(range(len(parValues)), parValues)
plt.ylabel("Porcentaje de acierto")
plt.legend(loc="lower left")
plt.savefig("aciertoSVMNO.pdf", format='pdf')
plt.show()
# Calculamos el porcentaje de acierto sobre los datos de test para el
# valor escogido de C
svm = SVC(kernel='linear', C=parValues[opt])
svm.fit(Wtrain,Ytrain)
ac = svm.score(Wtest,Ytest) * 100
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
# Probamos ahora con kernel gaussiano utilizando distintos valores de C
# y de sigma
acTrain = np.zeros((len(regValues), len(regValues)))
acVal = np.zeros((len(regValues), len(regValues)))
for i in range(len(parValues)):
for j in range(len(parValues)):
svm = SVC(kernel='rbf', C=parValues[i],
gamma=1/(2*parValues[j]**2))
svm.fit(Wtrain,Ytrain)
acTrain[i][j] = svm.score(Wtrain,Ytrain) * 100
acVal[i][j] = svm.score(Wval,Yval) * 100
# Comprobamos el pocentaje de acierto según los valores de C y sigma
opt = np.argmax(acVal)
optC, optSigma = parValues[opt//len(parValues)], parValues[opt%len(parValues)]
print('El valor óptimo del parámetro C es', optC)
print('El valor óptimo para el parámetro sigma es', optSigma)
plt.figure(figsize=(10,10))
plt.title(r"Porcentaje de aciertos según los valores de $\sigma$ y C")
plt.ylabel('$C$')
plt.xlabel(r'$\sigma$')
fig = plt.subplot()
im = fig.imshow(acTrain, cmap=cm.viridis)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(parValues)):
for j in range(len(parValues)):
text = fig.text(i, j, round(acTrain[j][i],2), ha="center",
va="center", color=("k" if acTrain[j][i] > 70 else "w"))
fig.set_xticks(np.arange(len(parValues)))
fig.set_yticks(np.arange(len(parValues)))
fig.set_xticklabels(parValues)
fig.set_yticklabels(parValues)
plt.savefig("aciertoTrainSVMNO.pdf", format='pdf')
plt.show()
plt.figure(figsize=(10,10))
plt.title(r"Porcentaje de aciertos según los valores de $\sigma$ y C")
plt.ylabel('$C$')
plt.xlabel(r'$\sigma$')
fig = plt.subplot()
im = fig.imshow(acVal, cmap=cm.viridis)
cax = make_axes_locatable(fig).append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, cax=cax)
for i in range(len(parValues)):
for j in range(len(parValues)):
text = fig.text(i, j, round(acVal[j][i],2), ha="center",
va="center", color=("k" if acVal[j][i] > 70 else "w"))
fig.set_xticks(np.arange(len(parValues)))
fig.set_yticks(np.arange(len(parValues)))
fig.set_xticklabels(parValues)
fig.set_yticklabels(parValues)
plt.savefig("aciertoValSVMNO.pdf", format='pdf')
plt.show()
# Calculamos el porcentaje de acierto sobre los datos de test para el
# valor escogido de C y sigma
svm = SVC(kernel='rbf', C=optC, gamma=1/(2*optSigma**2))
svm.fit(Wtrain,Ytrain)
ac = svm.score(Wtest,Ytest) * 100
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
#%% Parte 4b: K-Medias
# Entrenamos un K-Medias con 2 clusters
kmeans = KMeans(n_clusters=2, random_state=0).fit(Wtrain)
trainLabels = kmeans.labels_
valLabels = kmeans.predict(Wval)
# Comprobamos el pocentaje de acierto según la interpretación de las etiquetas
acTrainA = np.count_nonzero(trainLabels == Ytrain) / len(Ytrain) * 100
acTrainB = np.count_nonzero(trainLabels != Ytrain) / len(Ytrain) * 100
acValA = np.count_nonzero(valLabels == Yval) / len(Yval) * 100
acValB = np.count_nonzero(valLabels != Yval) / len(Yval) * 100
print('Interpretando las etiquetas de forma directa, el entrenamiento' +
' obtiene un porcentaje de acierto del ', acTrainA, '%')
print('Interpretando las etiquetas de forma inversa, el entrenamiento' +
' obtiene un porcentaje de acierto del ', acTrainB, '%')
print('Interpretando las etiquetas de forma directa, la validación ' +
'obtiene un porcentaje de acierto del ', acValA, '%')
print('Interpretando las etiquetas de forma inversa, la validación ' +
'obtiene un porcentaje de acierto del ', acValB, '%')
# Calculamos el porcentaje de acierto sobre los datos de test para la
# interpretación escogida de las etiquetas
testLabels = kmeans.predict(Wtest)
ac = (np.count_nonzero(testLabels == Ytest) if acValA > acValB
else np.count_nonzero(testLabels != Ytest)) / len(Ytest) * 100
print('El porcentaje de acierto sobre los datos de test es', ac, '%')
#%% Parte 5b: Reducción de dimensionalidad
# Aplicamos PCA y comprobamos la varianza explicada para cada componente
pca = PCA()
WtrainR = pca.fit_transform(Wtrain)
expVar = pca.explained_variance_ratio_
expVarAcum = [expVar[0]]
for i in range(1, len(expVar)):
expVarAcum.append(expVar[i] + expVarAcum[-1])
print("La varianza explicada acumulada es:", np.array(expVarAcum) * 100)
| 36.974273
| 78
| 0.671305
| 4,997
| 33,055
| 4.405243
| 0.093056
| 0.026166
| 0.034525
| 0.022896
| 0.826284
| 0.807568
| 0.781902
| 0.76682
| 0.760187
| 0.75201
| 0
| 0.025733
| 0.171169
| 33,055
| 894
| 79
| 36.974273
| 0.777749
| 0.148692
| 0
| 0.72144
| 0
| 0
| 0.16103
| 0.007384
| 0
| 0
| 0
| 0.001119
| 0
| 1
| 0.018779
| false
| 0
| 0.017214
| 0.00313
| 0.054773
| 0.051643
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
99b72555af1ec8fd32d9e2e95828039325e59a49
| 182
|
py
|
Python
|
zeeguu_core/util/__init__.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 1
|
2018-03-22T12:29:49.000Z
|
2018-03-22T12:29:49.000Z
|
zeeguu_core/util/__init__.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 82
|
2017-12-09T16:15:02.000Z
|
2020-11-12T11:34:09.000Z
|
zeeguu_core/util/__init__.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 9
|
2017-11-25T11:32:05.000Z
|
2020-10-26T15:50:13.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from zeeguu_core.util.encoding import JSONSerializable, encode, encode_error
from zeeguu_core.util.hash import text_hash, password_hash
| 30.333333
| 76
| 0.785714
| 26
| 182
| 5.307692
| 0.692308
| 0.144928
| 0.202899
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006135
| 0.104396
| 182
| 5
| 77
| 36.4
| 0.840491
| 0.225275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
99c4e0282647a2089375d952a0b32107dc39a844
| 1,504
|
py
|
Python
|
onpolicy/envs/gridworld/gym_minigrid/envs/__init__.py
|
zoeyuchao/onpolicy-release
|
c2cb64e59c5b1f21cce022db76c378b396fd480e
|
[
"MIT"
] | 1
|
2021-07-04T08:08:30.000Z
|
2021-07-04T08:08:30.000Z
|
onpolicy/envs/gridworld/gym_minigrid/envs/__init__.py
|
zoeyuchao/onpolicy-release
|
c2cb64e59c5b1f21cce022db76c378b396fd480e
|
[
"MIT"
] | 1
|
2021-06-11T15:28:11.000Z
|
2021-06-11T15:28:11.000Z
|
onpolicy/envs/gridworld/gym_minigrid/envs/__init__.py
|
zoeyuchao/onpolicy-release
|
c2cb64e59c5b1f21cce022db76c378b396fd480e
|
[
"MIT"
] | 1
|
2021-05-17T02:00:18.000Z
|
2021-05-17T02:00:18.000Z
|
# from onpolicy.envs.gridworld.gym_minigrid.envs.empty import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.doorkey import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.multiroom import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.fetch import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.gotoobject import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.gotodoor import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.putnear import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.lockedroom import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.keycorridor import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.unlock import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.unlockpickup import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.blockedunlockpickup import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.playground_v0 import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.redbluedoors import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.obstructedmaze import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.memory import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.fourrooms import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.crossing import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.lavagap import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.dynamicobstacles import *
# from onpolicy.envs.gridworld.gym_minigrid.envs.distshift import *
from onpolicy.envs.gridworld.gym_minigrid.envs.human import *
| 65.391304
| 77
| 0.825798
| 199
| 1,504
| 6.125628
| 0.150754
| 0.216571
| 0.288761
| 0.45119
| 0.825267
| 0.825267
| 0.825267
| 0.792453
| 0
| 0
| 0
| 0.000717
| 0.072473
| 1,504
| 22
| 78
| 68.363636
| 0.873118
| 0.930186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
820228c2b3dc4560ff7fe393c06d5f2587692064
| 78
|
py
|
Python
|
priors/__init__.py
|
holarchy/Holon
|
2a557b300bce10fb2c2ab85a1db4bdfd5df470aa
|
[
"MIT"
] | null | null | null |
priors/__init__.py
|
holarchy/Holon
|
2a557b300bce10fb2c2ab85a1db4bdfd5df470aa
|
[
"MIT"
] | null | null | null |
priors/__init__.py
|
holarchy/Holon
|
2a557b300bce10fb2c2ab85a1db4bdfd5df470aa
|
[
"MIT"
] | null | null | null |
from .priors import make_prior_from_df
from .process_priors import make_priors
| 39
| 39
| 0.884615
| 13
| 78
| 4.923077
| 0.538462
| 0.375
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089744
| 78
| 2
| 39
| 39
| 0.901408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8233c14e46066bd2dd38f93834735f4e146eae80
| 10,792
|
py
|
Python
|
ui/AboutWidget.py
|
penglecn/ChiaTools
|
ca55de3e135c962d46eb821be975444b4654775b
|
[
"Apache-2.0"
] | 6
|
2021-07-01T21:30:44.000Z
|
2022-03-25T01:35:41.000Z
|
ui/AboutWidget.py
|
penglecn/ChiaTools
|
ca55de3e135c962d46eb821be975444b4654775b
|
[
"Apache-2.0"
] | 1
|
2021-07-06T14:05:40.000Z
|
2021-07-06T14:05:40.000Z
|
ui/AboutWidget.py
|
pengbeicn/ChiaTools
|
ca55de3e135c962d46eb821be975444b4654775b
|
[
"Apache-2.0"
] | 3
|
2021-05-07T10:01:18.000Z
|
2021-05-21T08:38:45.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\peng\Desktop\chia-tools\ui\AboutWidget.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AboutWidget(object):
def setupUi(self, AboutWidget):
AboutWidget.setObjectName("AboutWidget")
AboutWidget.resize(689, 489)
self.verticalLayout = QtWidgets.QVBoxLayout(AboutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.textBrowser = QtWidgets.QTextBrowser(AboutWidget)
self.textBrowser.setMinimumSize(QtCore.QSize(600, 0))
self.textBrowser.setStyleSheet("QTextEdit {\n"
" background-color: rgba(255, 255, 255, 0);\n"
"}")
self.textBrowser.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textBrowser.setOpenExternalLinks(True)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout_2.addWidget(self.textBrowser)
self.horizontalLayout.addLayout(self.verticalLayout_2)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(AboutWidget)
QtCore.QMetaObject.connectSlotsByName(AboutWidget)
def retranslateUi(self, AboutWidget):
_translate = QtCore.QCoreApplication.translate
AboutWidget.setWindowTitle(_translate("AboutWidget", "Form"))
self.textBrowser.setHtml(_translate("AboutWidget", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'SimSun\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600;\">ChiaTools</span><span style=\" font-size:10pt;\">是独立的开源免费软件,旨在帮助Chia矿工们整合各种繁琐的命令行,并提供可视化的操作界面。</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">如果你在使用过程中发现了问题,或者有更好的建议,欢迎加入我们的群:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">QQ群: 926625265</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">微信群: 添加我的微信号(penglecn)后拉进群</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">或者在线提交Issue:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://gitee.com/devteamcn/chia-tools/issues\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">https://gitee.com/devteamcn/chia-tools/issues</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt; text-decoration: underline; color:#0000ff;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">开源首页和使用说明</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://gitee.com/devteamcn/chia-tools\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">https://gitee.com/devteamcn/chia-tools</span></a></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">最新版本下载地址</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://gitee.com/devteamcn/chia-tools/releases\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">https://gitee.com/devteamcn/chia-tools/releases</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600;\">多线程P图命令行程序</span><span style=\" font-size:10pt;\"> 0.1.5</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://github.com/stotiks/chia-plotter/releases\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">https://github.com/stotiks/chia-plotter/releases</span></a></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://github.com/stotiks/chia-plotter/releases\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">https://github.com/madMAx43v3r/chia-plotter</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600;\">HPoolOG老挖矿程序</span><span style=\" font-size:10pt;\"> 1.5.3-1</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://github.com/hpool-dev/chia-miner/releases\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">https://github.com/hpool-dev/chia-miner/releases</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600;\">HPoolPP新挖矿程序 </span><span style=\" font-size:10pt;\">1.5.0-2</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://github.com/hpool-dev/chiapp-miner/releases\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">https://github.com/hpool-dev/chiapp-miner/releases</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600;\">火币挖矿程序</span><span style=\" font-size:10pt;\"> 1.0.0</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://github.com/github-huobipool/HuobiPool-Chia-Miner-release/releases\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">https://github.com/github-huobipool/HuobiPool-Chia-Miner-release/releases</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">另外,如果你挖矿已经回本,并且该软件帮助你产生了丰厚的收益,请别忘了请作者喝杯咖啡哈!</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600; color:#000000;\">感谢你对开源免费软件的支持!</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600;\">打赏XCH</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">xch1zdnvg4xpfzm6smadxfckmg8ma3q7sq0hwsamjapjg75ztsm8fclqqk5tuu</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600;\">打赏RMB</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><img src=\":/img/donate_alipay\" /><span style=\" font-size:10pt;\"> </span><img src=\":/img/donate_weixin\" /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt;\"><br /></p></body></html>"))
import resources_rc
| 124.045977
| 375
| 0.706727
| 1,637
| 10,792
| 4.651802
| 0.130727
| 0.127643
| 0.064609
| 0.085095
| 0.739724
| 0.736967
| 0.730401
| 0.726986
| 0.71044
| 0.707814
| 0
| 0.04555
| 0.074407
| 10,792
| 86
| 376
| 125.488372
| 0.716788
| 0.02891
| 0
| 0.125
| 1
| 0.083333
| 0.239878
| 0.046314
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.027778
| 0
| 0.069444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
415cd5d07e99c136c58aa3d0c29a0afa95376834
| 1,788
|
py
|
Python
|
scgv/tests/test_multiplier_error.py
|
lchorbadjiev/SCGV
|
7b2fd1fbada7bea49166e37bcb82bd742617fe51
|
[
"MIT"
] | 8
|
2017-03-31T19:55:36.000Z
|
2021-01-22T09:11:40.000Z
|
scgv/tests/test_multiplier_error.py
|
lchorbadjiev/SCGV
|
7b2fd1fbada7bea49166e37bcb82bd742617fe51
|
[
"MIT"
] | null | null | null |
scgv/tests/test_multiplier_error.py
|
lchorbadjiev/SCGV
|
7b2fd1fbada7bea49166e37bcb82bd742617fe51
|
[
"MIT"
] | 2
|
2019-06-11T09:07:01.000Z
|
2020-09-25T02:30:22.000Z
|
'''
Created on Jan 4, 2017
@author: lubo
'''
import numpy as np
import pytest
from scgv.views.sample import SamplesViewer
def test_multiplier(model_fixture):
sample = SamplesViewer(model_fixture)
assert sample is not None
sample_name = 'CJA5294'
assert sample_name in model_fixture.column_labels
m1 = sample.calc_ploidy(sample_name)
sample_index = np.where(model_fixture.column_labels == sample_name)
m2 = model_fixture.multiplier[sample_index]
assert len(m2) == 1
print(m1, m2[0])
assert m1 == pytest.approx(m2[0], abs=1E-6)
def test_all_mutipliers(model_fixture):
sample = SamplesViewer(model_fixture)
for sample_name in model_fixture.column_labels:
m1 = sample.calc_ploidy(sample_name)
sample_index = np.where(model_fixture.column_labels == sample_name)
m2 = model_fixture.multiplier[sample_index]
assert len(m2) == 1
assert m1 == pytest.approx(m2[0], abs=1E-6)
def test_error(model_fixture):
sample = SamplesViewer(model_fixture)
assert sample is not None
sample_name = 'CJA5294'
assert sample_name in model_fixture.column_labels
e1 = sample.calc_error(sample_name)
sample_index = np.where(model_fixture.column_labels == sample_name)
e2 = model_fixture.error[sample_index]
assert len(e2) == 1
print(e1, e2[0])
assert e1 == pytest.approx(e2[0], abs=1E-6)
def test_all_errors(model_fixture):
sample = SamplesViewer(model_fixture)
for sample_name in model_fixture.column_labels:
e1 = sample.calc_error(sample_name)
sample_index = np.where(model_fixture.column_labels == sample_name)
e2 = model_fixture.error[sample_index]
assert len(e2) == 1
assert e1 == pytest.approx(e2[0], abs=1E-6)
| 25.183099
| 75
| 0.704139
| 253
| 1,788
| 4.73913
| 0.205534
| 0.200167
| 0.1201
| 0.160133
| 0.874896
| 0.874896
| 0.874896
| 0.864053
| 0.864053
| 0.81568
| 0
| 0.038408
| 0.199105
| 1,788
| 70
| 76
| 25.542857
| 0.798883
| 0.020694
| 0
| 0.780488
| 0
| 0
| 0.008032
| 0
| 0
| 0
| 0
| 0
| 0.292683
| 1
| 0.097561
| false
| 0
| 0.073171
| 0
| 0.170732
| 0.04878
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
41721690f81f9421260c4b33695f76b8360119c3
| 7,112
|
py
|
Python
|
calcparser.py
|
kentdlee/GenComp
|
084da463b00557b4e0181c1a8c8d1554b4c7b2fb
|
[
"MIT"
] | 2
|
2020-03-03T03:29:56.000Z
|
2020-06-09T03:13:10.000Z
|
calcparser.py
|
kentdlee/GenComp
|
084da463b00557b4e0181c1a8c8d1554b4c7b2fb
|
[
"MIT"
] | null | null | null |
calcparser.py
|
kentdlee/GenComp
|
084da463b00557b4e0181c1a8c8d1554b4c7b2fb
|
[
"MIT"
] | null | null | null |
from calcbackend import *
from genparser import *
class calcParser(Parser):
def __init__(self):
super().__init__({0: LR0State(0,frozenset({LR0Item(0,Production(0,11,[12, 10],'Prog'),0,set()), LR0Item(2,Production(2,12,[],'None'),0,{0, 1, 7, 8, 10}), LR0Item(1,Production(1,12,[12, 13, 9],'None'),0,set())}),{12: 1},False), 1: LR0State(1,frozenset({LR0Item(2,Production(3,13,[14],'print(E)'),0,set()), LR0Item(3,Production(0,11,[12, 10],'Prog'),1,set()), LR0Item(4,Production(5,14,[14, 4, 15],'float(E)-float(T)'),0,set()), LR0Item(3,Production(1,12,[12, 13, 9],'None'),1,set()), LR0Item(3,Production(4,14,[14, 3, 15],'float(E)+float(T)'),0,set()), LR0Item(5,Production(6,14,[15],'T'),0,set()), LR0Item(6,Production(7,15,[15, 5, 16],'float(T)*float(St)'),0,set()), LR0Item(7,Production(8,15,[15, 6, 16],'float(T)/float(St)'),0,set()), LR0Item(8,Production(9,15,[16],'St'),0,set()), LR0Item(9,Production(10,16,[7, 17],'memory.store(float(F))'),0,set()), LR0Item(10,Production(11,16,[17],'F'),0,set()), LR0Item(13,Production(14,17,[8],'memory.recall()'),0,set()), LR0Item(12,Production(13,17,[1, 14, 2],'E'),0,set()), LR0Item(11,Production(12,17,[0],'number'),0,set())}),{14: 2, 10: 3, 13: 4, 15: 5, 16: 6, 7: 7, 17: 8, 8: 9, 1: 10, 0: 11},False), 2: LR0State(2,frozenset({LR0Item(14,Production(3,13,[14],'print(E)'),1,{9}), LR0Item(14,Production(4,14,[14, 3, 15],'float(E)+float(T)'),1,set()), LR0Item(14,Production(5,14,[14, 4, 15],'float(E)-float(T)'),1,set())}),{3: 13, 4: 15},False), 3: LR0State(3,frozenset({LR0Item(14,Production(0,11,[12, 10],'Prog'),2,set())}),{},True), 4: LR0State(4,frozenset({LR0Item(14,Production(1,12,[12, 13, 9],'None'),2,set())}),{9: 23},False), 5: LR0State(5,frozenset({LR0Item(14,Production(6,14,[15],'T'),1,{2, 3, 4, 5, 6, 9}), LR0Item(14,Production(7,15,[15, 5, 16],'float(T)*float(St)'),1,set()), LR0Item(14,Production(8,15,[15, 6, 16],'float(T)/float(St)'),1,set())}),{5: 17, 6: 18},False), 6: LR0State(6,frozenset({LR0Item(14,Production(9,15,[16],'St'),1,{2, 3, 4, 5, 6, 9})}),{},False), 7: LR0State(7,frozenset({LR0Item(1,Production(12,17,[0],'number'),0,set()), LR0Item(14,Production(10,16,[7, 17],'memory.store(float(F))'),1,set()), LR0Item(2,Production(13,17,[1, 14, 2],'E'),0,set()), LR0Item(3,Production(14,17,[8],'memory.recall()'),0,set())}),{0: 11, 17: 22, 1: 10, 8: 9},False), 8: LR0State(8,frozenset({LR0Item(14,Production(11,16,[17],'F'),1,{2, 3, 4, 5, 6, 9})}),{},False), 9: LR0State(9,frozenset({LR0Item(14,Production(14,17,[8],'memory.recall()'),1,{2, 3, 4, 5, 6, 9})}),{},False), 10: LR0State(10,frozenset({LR0Item(1,Production(4,14,[14, 3, 15],'float(E)+float(T)'),0,set()), LR0Item(14,Production(13,17,[1, 14, 2],'E'),1,set()), LR0Item(2,Production(5,14,[14, 4, 15],'float(E)-float(T)'),0,set()), LR0Item(4,Production(7,15,[15, 5, 16],'float(T)*float(St)'),0,set()), LR0Item(3,Production(6,14,[15],'T'),0,set()), LR0Item(5,Production(8,15,[15, 6, 16],'float(T)/float(St)'),0,set()), LR0Item(6,Production(9,15,[16],'St'),0,set()), LR0Item(7,Production(10,16,[7, 17],'memory.store(float(F))'),0,set()), LR0Item(8,Production(11,16,[17],'F'),0,set()), LR0Item(9,Production(12,17,[0],'number'),0,set()), LR0Item(11,Production(14,17,[8],'memory.recall()'),0,set()), LR0Item(10,Production(13,17,[1, 14, 2],'E'),0,set())}),{14: 12, 15: 5, 16: 6, 7: 7, 17: 8, 0: 11, 8: 9, 1: 10},False), 11: LR0State(11,frozenset({LR0Item(14,Production(12,17,[0],'number'),1,{2, 3, 4, 5, 6, 9})}),{},False), 12: LR0State(12,frozenset({LR0Item(12,Production(4,14,[14, 3, 15],'float(E)+float(T)'),1,set()), LR0Item(12,Production(13,17,[1, 14, 2],'E'),2,set()), LR0Item(12,Production(5,14,[14, 4, 15],'float(E)-float(T)'),1,set())}),{3: 13, 2: 14, 4: 15},False), 13: LR0State(13,frozenset({LR0Item(1,Production(7,15,[15, 5, 16],'float(T)*float(St)'),0,set()), LR0Item(2,Production(8,15,[15, 6, 16],'float(T)/float(St)'),0,set()), LR0Item(3,Production(4,14,[14, 3, 15],'float(E)+float(T)'),2,set()), LR0Item(4,Production(10,16,[7, 17],'memory.store(float(F))'),0,set()), LR0Item(3,Production(9,15,[16],'St'),0,set()), LR0Item(5,Production(11,16,[17],'F'),0,set()), LR0Item(6,Production(12,17,[0],'number'),0,set()), LR0Item(7,Production(13,17,[1, 14, 2],'E'),0,set()), LR0Item(8,Production(14,17,[8],'memory.recall()'),0,set())}),{15: 21, 7: 7, 16: 6, 17: 8, 0: 11, 1: 10, 8: 9},False), 14: LR0State(14,frozenset({LR0Item(3,Production(13,17,[1, 14, 2],'E'),3,{2, 3, 4, 5, 6, 9})}),{},False), 15: LR0State(15,frozenset({LR0Item(1,Production(7,15,[15, 5, 16],'float(T)*float(St)'),0,set()), LR0Item(2,Production(8,15,[15, 6, 16],'float(T)/float(St)'),0,set()), LR0Item(3,Production(5,14,[14, 4, 15],'float(E)-float(T)'),2,set()), LR0Item(4,Production(10,16,[7, 17],'memory.store(float(F))'),0,set()), LR0Item(3,Production(9,15,[16],'St'),0,set()), LR0Item(5,Production(11,16,[17],'F'),0,set()), LR0Item(6,Production(12,17,[0],'number'),0,set()), LR0Item(7,Production(13,17,[1, 14, 2],'E'),0,set()), LR0Item(8,Production(14,17,[8],'memory.recall()'),0,set())}),{15: 16, 7: 7, 16: 6, 17: 8, 0: 11, 1: 10, 8: 9},False), 16: LR0State(16,frozenset({LR0Item(9,Production(7,15,[15, 5, 16],'float(T)*float(St)'),1,set()), LR0Item(9,Production(5,14,[14, 4, 15],'float(E)-float(T)'),3,{2, 3, 4, 5, 6, 9}), LR0Item(9,Production(8,15,[15, 6, 16],'float(T)/float(St)'),1,set())}),{5: 17, 6: 18},False), 17: LR0State(17,frozenset({LR0Item(1,Production(10,16,[7, 17],'memory.store(float(F))'),0,set()), LR0Item(4,Production(13,17,[1, 14, 2],'E'),0,set()), LR0Item(3,Production(7,15,[15, 5, 16],'float(T)*float(St)'),2,set()), LR0Item(2,Production(11,16,[17],'F'),0,set()), LR0Item(3,Production(12,17,[0],'number'),0,set()), LR0Item(5,Production(14,17,[8],'memory.recall()'),0,set())}),{7: 7, 1: 10, 16: 20, 17: 8, 0: 11, 8: 9},False), 18: LR0State(18,frozenset({LR0Item(1,Production(10,16,[7, 17],'memory.store(float(F))'),0,set()), LR0Item(4,Production(13,17,[1, 14, 2],'E'),0,set()), LR0Item(3,Production(8,15,[15, 6, 16],'float(T)/float(St)'),2,set()), LR0Item(2,Production(11,16,[17],'F'),0,set()), LR0Item(3,Production(12,17,[0],'number'),0,set()), LR0Item(5,Production(14,17,[8],'memory.recall()'),0,set())}),{7: 7, 1: 10, 16: 19, 17: 8, 0: 11, 8: 9},False), 19: LR0State(19,frozenset({LR0Item(6,Production(8,15,[15, 6, 16],'float(T)/float(St)'),3,{2, 3, 4, 5, 6, 9})}),{},False), 20: LR0State(20,frozenset({LR0Item(6,Production(7,15,[15, 5, 16],'float(T)*float(St)'),3,{2, 3, 4, 5, 6, 9})}),{},False), 21: LR0State(21,frozenset({LR0Item(9,Production(7,15,[15, 5, 16],'float(T)*float(St)'),1,set()), LR0Item(9,Production(4,14,[14, 3, 15],'float(E)+float(T)'),3,{2, 3, 4, 5, 6, 9}), LR0Item(9,Production(8,15,[15, 6, 16],'float(T)/float(St)'),1,set())}),{5: 17, 6: 18},False), 22: LR0State(22,frozenset({LR0Item(4,Production(10,16,[7, 17],'memory.store(float(F))'),2,{2, 3, 4, 5, 6, 9})}),{},False), 23: LR0State(23,frozenset({LR0Item(1,Production(1,12,[12, 13, 9],'None'),3,{0, 1, 7, 8, 10})}),{},False)},['number', "'('", "')'", "'+'", "'-'", "'*'", "'/'", "'S'", "'R'", "';'", 'endoffile', 'Start', 'Prog', 'Stmt', 'E', 'T', 'St', 'F'])
def eval(self,expression):
return eval(expression)
| 711.2
| 6,958
| 0.604331
| 1,360
| 7,112
| 3.154412
| 0.045588
| 0.13986
| 0.117949
| 0.054545
| 0.796737
| 0.758974
| 0.72331
| 0.68648
| 0.612821
| 0.570163
| 0
| 0.186752
| 0.059618
| 7,112
| 9
| 6,959
| 790.222222
| 0.454695
| 0
| 0
| 0
| 0
| 0
| 0.142014
| 0.024747
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.285714
| 0.142857
| 0.857143
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
6b644ee301a53a04062058ddfbb47b61b78cb41e
| 607
|
py
|
Python
|
accounts/models.py
|
mugagambi/retail-system
|
82bd9f243836aadee001fa7f17d1d93441214aa8
|
[
"MIT"
] | 1
|
2019-10-08T13:53:49.000Z
|
2019-10-08T13:53:49.000Z
|
accounts/models.py
|
mugagambi/retail-system
|
82bd9f243836aadee001fa7f17d1d93441214aa8
|
[
"MIT"
] | null | null | null |
accounts/models.py
|
mugagambi/retail-system
|
82bd9f243836aadee001fa7f17d1d93441214aa8
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
# Create your models here.
class IncomeAccount(models.Model):
name = models.CharField(max_length=100)
amount = models.DecimalField(max_digits=15, decimal_places=2)
created_at = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
class ExpenditureAccount(models.Model):
name = models.CharField(max_length=100)
amount = models.DecimalField(max_digits=15, decimal_places=2)
created_at = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
| 27.590909
| 65
| 0.744646
| 78
| 607
| 5.589744
| 0.448718
| 0.045872
| 0.068807
| 0.09633
| 0.738532
| 0.738532
| 0.738532
| 0.738532
| 0.738532
| 0.738532
| 0
| 0.023622
| 0.163097
| 607
| 21
| 66
| 28.904762
| 0.834646
| 0.039539
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0.142857
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
6bf74296fa6be1435a27cc694b5427076bfca326
| 774
|
py
|
Python
|
core/apps/kubeops_api/models/item_resource.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 3
|
2020-04-05T04:53:24.000Z
|
2020-04-05T04:53:34.000Z
|
core/apps/kubeops_api/models/item_resource.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 27
|
2021-05-05T02:51:26.000Z
|
2022-01-04T21:30:21.000Z
|
core/apps/kubeops_api/models/item_resource.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 1
|
2020-07-06T04:53:51.000Z
|
2020-07-06T04:53:51.000Z
|
import uuid
from django.db import models
__all__ = ["ItemResource"]
class ItemResource(models.Model):
RESOURCE_TYPE_CLUSTER = 'CLUSTER'
RESOURCE_TYPE_HOST = 'HOST'
RESOURCE_TYPE_PLAN = 'PLAN'
RESOURCE_TYPE_BACKUP_STORAGE = 'BACKUP_STORAGE'
RESOURCE_TYPE_STORAGE = 'STORAGE'
RESOURCE_TYPE_CHOICES = (
(RESOURCE_TYPE_CLUSTER,'CLUSTER'),
(RESOURCE_TYPE_HOST,'HOST'),
(RESOURCE_TYPE_PLAN,'PLAN'),
(RESOURCE_TYPE_BACKUP_STORAGE,'BACKUP_STORAGE'),
(RESOURCE_TYPE_STORAGE,'STORAGE')
)
item_id = models.UUIDField(max_length=255, default=uuid.uuid4)
resource_id = models.UUIDField(max_length=255, default=uuid.uuid4)
resource_type = models.CharField(max_length=64,choices=RESOURCE_TYPE_CHOICES)
| 28.666667
| 81
| 0.723514
| 91
| 774
| 5.736264
| 0.307692
| 0.298851
| 0.109195
| 0.099617
| 0.701149
| 0.701149
| 0.701149
| 0.701149
| 0.701149
| 0.701149
| 0
| 0.015625
| 0.173127
| 774
| 26
| 82
| 29.769231
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.108668
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.631579
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
d445986ab273203a10192235933257890754de6d
| 11,577
|
py
|
Python
|
src/openprocurement/tender/core/procedure/models/auction.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 10
|
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/tender/core/procedure/models/auction.py
|
quintagroup/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 26
|
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/tender/core/procedure/models/auction.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 15
|
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
from openprocurement.tender.core.procedure.models.base import Model, ListType, ModelType
from openprocurement.tender.core.procedure.context import get_request, get_tender
from openprocurement.api.models import IsoDateTimeType
from schematics.exceptions import ValidationError
from schematics.types import URLType, MD5Type, FloatType, StringType, BooleanType
from itertools import zip_longest
# set urls
class LotAuctionUrl(Model):
id = MD5Type()
auctionUrl = URLType()
class ParticipationUrl(Model):
id = MD5Type()
participationUrl = URLType() # required ?
class LotValueUrl(Model):
relatedLot = MD5Type()
participationUrl = URLType()
class AuctionUrls(Model):
auctionUrl = URLType() # required ?
bids = ListType(ModelType(ParticipationUrl, required=True))
def validate_bids(self, _, bids):
"""
example input
"bids": [{}, {"participationUrl": "http://..."}, {}]
"""
bid_ids = [b["id"] for b in get_tender().get("bids", "")]
passed_ids = []
for bid, positional_bid_id in zip_longest(bids, bid_ids):
if None in (positional_bid_id, bid):
raise ValidationError("Number of bids did not match the number of tender bids")
if bid.id is None:
bid.id = positional_bid_id
# For now we allow to skip passing id of update object
# Also empty objects {} do not appear in result when we call serialise() method
# there is a way to hack schematics to do so (see def openprocurement.api.Model.to_patch)
# but I don't want to stick to this version of schematics and to any version of it
passed_ids.append(bid.id)
if passed_ids != bid_ids:
raise ValidationError("Auction bids should be identical to the tender bids")
return bids
class BidLotValue(Model):
id = MD5Type()
lotValues = ListType(ModelType(LotValueUrl, required=True)) # optional? bid may be cancelled or something..
class LotAuctionUrls(Model):
# auctionUrl = URLType()
lots = ListType(ModelType(LotAuctionUrl, required=True), required=True)
bids = ListType(ModelType(BidLotValue, required=True), required=True)
def validate_lots(self, _, lots):
"""
example input
"lots": [{}, {"auctionUrl": "http://auction.."}, {}]
"""
lot_id = get_request().matchdict.get("auction_lot_id")
lot_ids = [l["id"] for l in get_tender().get("lots", "")]
passed_ids = []
for lot, positional_lot_id in zip_longest(lots, lot_ids):
if None in (positional_lot_id, lot):
raise ValidationError("Number of lots did not match the number of tender lots")
if lot.id is None:
lot.id = positional_lot_id
# For now we allow to skip passing id of update object
# Also empty objects {} do not appear in result when we call serialise() method
# there is a way to hack schematics to do so (see def openprocurement.api.Model.to_patch)
# but I don't want to stick to this version of schematics and to any version of it
if lot.id == lot_id:
if lot.auctionUrl is None:
raise ValidationError("Auction url required")
else: # post to /auctions/{lot_id} updates only related lots
for f in lot:
if f != "id":
lot[f] = None
passed_ids.append(lot.id)
if passed_ids != lot_ids:
raise ValidationError("Auction lots should be identical to the tender lots")
return lots
def validate_bids(self, _, bids):
"""
example input
"bids": [{}, {"lotValues": [{}, {"participationUrl": "http://..."}, {}]}, {}]
"""
lot_id = get_request().matchdict.get("auction_lot_id")
bid_ids = [b["id"] for b in get_tender().get("bids", "")]
tender_bids = {b["id"]: b for b in get_tender().get("bids", "")}
passed_ids = []
for bid, positional_bid_id in zip_longest(bids, bid_ids):
if None in (positional_bid_id, bid):
raise ValidationError("Number of auction results did not match the number of tender bids")
if bid.id is None:
bid.id = positional_bid_id
elif bid.id not in tender_bids:
raise ValidationError("Auction bids should be identical to the tender bids")
# For now we allow to skip passing id of update object
# Also empty objects {} do not appear in result when we call serialise() method
# there is a way to hack schematics to do so (see def openprocurement.api.Model.to_patch)
# but I don't want to stick to this version of schematics and to any version of it
passed_ids.append(bid.id)
# lotValues check ---
if bid.lotValues:
passed_related_lots = []
tender_related_lots = [v["relatedLot"] for v in tender_bids[bid.id]["lotValues"]]
for value, positional_related_lot in zip_longest(bid.lotValues, tender_related_lots):
if positional_related_lot is None:
raise ValidationError(
"Number of lots of auction results did not match the number of tender lots")
if value is None: # passed list actually can be shorter
continue
if value.relatedLot is None:
value.relatedLot = positional_related_lot
if value.relatedLot == lot_id:
if value.participationUrl is None:
raise ValidationError("Auction participation url required")
else: # post to /auctions/{lot_id} updates only related lotValues
for f in value:
if f != "relatedLot":
value[f] = None
passed_related_lots.append(value.relatedLot)
if passed_related_lots != tender_related_lots[:len(passed_related_lots)]: # passed can be shorter
raise ValidationError("Auction bid.lotValues should be identical to the tender bid.lotValues")
# -- lotValues check
if passed_ids != bid_ids:
raise ValidationError("Auction bids should be identical to the tender bids")
return bids
# auction results
class ValueResult(Model):
amount = FloatType(min_value=0)
date = IsoDateTimeType()
# these two required by tests and maybe "old" auctions TODO: rm them after new auctions
currency = StringType()
valueAddedTaxIncluded = BooleanType()
class WeightedValueResult(Model):
amount = FloatType(min_value=0)
date = IsoDateTimeType()
# these two required by tests and maybe "old" auctions TODO: rm them after new auctions
currency = StringType()
valueAddedTaxIncluded = BooleanType()
class BidResult(Model):
id = MD5Type()
value = ModelType(ValueResult)
weightedValue = ModelType(WeightedValueResult)
date = IsoDateTimeType()
class AuctionResults(Model):
bids = ListType(ModelType(BidResult, required=True))
def validate_bids(self, _, bids):
"""
example input
"bids": [{}, {"value": 1, "date": "2020-..."}, {}]
"""
bid_ids = [b["id"] for b in get_tender().get("bids", "")]
passed_ids = []
for bid, positional_bid_id in zip_longest(bids, bid_ids):
if None in (positional_bid_id, bid):
raise ValidationError("Number of auction results did not match the number of tender bids")
if bid.id is None:
bid.id = positional_bid_id
# For now we allow to skip passing id of update object
# Also empty objects {} do not appear in result when we call serialise() method
# there is a way to hack schematics to do so (see def openprocurement.api.Model.to_patch)
# but I don't want to stick to this version of schematics and to any version of it
passed_ids.append(bid.id)
if passed_ids != bid_ids:
raise ValidationError("Auction bids should be identical to the tender bids")
return bids
# auction lot results
class LotResult(Model):
relatedLot = MD5Type()
value = ModelType(ValueResult)
weightedValue = ModelType(WeightedValueResult)
date = IsoDateTimeType()
class BidLotResult(Model):
id = MD5Type()
lotValues = ListType(ModelType(LotResult, required=True))
class AuctionLotResults(Model):
bids = ListType(ModelType(BidLotResult, required=True), required=True)
def validate_bids(self, _, bids):
"""
example input
"bids": [{}, {"lotValues": [{}, {"value": 23, "date": "..."}, {}]}, {}]
"""
lot_id = get_request().matchdict.get("auction_lot_id")
bid_ids = [b["id"] for b in get_tender().get("bids", "")]
tender_bids = {b["id"]: b for b in get_tender().get("bids", "")}
passed_ids = []
for bid, positional_bid_id in zip_longest(bids, bid_ids):
if None in (positional_bid_id, bid):
raise ValidationError("Number of auction results did not match the number of tender bids")
if bid.id is None:
bid.id = positional_bid_id
elif bid.id not in tender_bids:
raise ValidationError("Auction bids should be identical to the tender bids")
# For now we allow to skip passing id of update object
# Also empty objects {} do not appear in result when we call serialise() method
# there is a way to hack schematics to do so (see def openprocurement.api.Model.to_patch)
# but I don't want to stick to this version of schematics and to any version of it
passed_ids.append(bid.id)
# lotValues check ---
if bid.lotValues:
passed_related_lots = []
tender_related_lots = [v["relatedLot"] for v in tender_bids[bid.id]["lotValues"]]
for value, positional_related_lot in zip_longest(bid.lotValues, tender_related_lots):
if positional_related_lot is None:
raise ValidationError(
"Number of lots of auction results did not match the number of tender lots")
if value is None: # passed list actually can be shorter
continue
if value.relatedLot is None:
value.relatedLot = positional_related_lot
passed_related_lots.append(value.relatedLot)
# patch to /auctions/{lot_id} updates only related lotValues
if value.relatedLot != lot_id:
for f in value:
if f != "relatedLot":
value[f] = None
if passed_related_lots != tender_related_lots[:len(passed_related_lots)]: # passed can be shorter
raise ValidationError("Auction bid.lotValues should be identical to the tender bid.lotValues")
# -- lotValues check
if passed_ids != bid_ids:
raise ValidationError("Auction bids should be identical to the tender bids")
return bids
| 43.197761
| 114
| 0.60171
| 1,389
| 11,577
| 4.901368
| 0.11951
| 0.020564
| 0.026439
| 0.025118
| 0.79671
| 0.761604
| 0.727673
| 0.727673
| 0.720182
| 0.701087
| 0
| 0.002136
| 0.31243
| 11,577
| 267
| 115
| 43.359551
| 0.853141
| 0.220264
| 0
| 0.710843
| 0
| 0
| 0.129303
| 0
| 0
| 0
| 0
| 0.003745
| 0
| 1
| 0.03012
| false
| 0.126506
| 0.036145
| 0
| 0.36747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
d44e2233c193afbe9a42b253be5890048de0cc5d
| 1,800
|
py
|
Python
|
algorithm7.py
|
kipkat/AMSTK7
|
3c82053e8033eaac265d99fe961d2c3db3ea3c71
|
[
"MIT"
] | null | null | null |
algorithm7.py
|
kipkat/AMSTK7
|
3c82053e8033eaac265d99fe961d2c3db3ea3c71
|
[
"MIT"
] | null | null | null |
algorithm7.py
|
kipkat/AMSTK7
|
3c82053e8033eaac265d99fe961d2c3db3ea3c71
|
[
"MIT"
] | null | null | null |
import random as nature
def amstk7(s: str, chars: int = 64):
seed = len(s)
leg = len(s)
e = ''
for x in s:
asc = ord(x)
seed += asc
e += chr(asc * leg * len(str(asc)) % 26 + 65)
e += str((asc * 3 * leg + 42 * len(str(asc))) % 10 + len(e))
for x in e:
asc = ord(x)
e += chr(asc * leg * seed % 10 * len(str(asc)) % 26 + 65)
e += str((asc * 3 * leg + 42 * len(str(asc))) % 10 + len(e) + seed % 10)
e += chr(len(e) % 26 + 65)
seed *= int.from_bytes(e.encode(), 'big')
seed += int.from_bytes(s.encode(), 'big')
nature.seed(seed)
str_var = list(e)
nature.shuffle(str_var)
e = ''.join(str_var)
for x in range(1, 6):
str_var = list(e)
nature.shuffle(str_var)
e += ''.join(str_var)
return e[-chars:]
def seed_amstk7(s: str, seed2: int, chars: int = 64):
seed = len(s)
leg = len(s)
e = ''
for x in s:
asc = ord(x)
seed += ord(x)
e += chr(asc * leg * len(str(asc)) % 26 + 65)
e += str((asc * 3 * leg + 42 * len(str(asc))) % 10 + len(e))
for x in e:
asc = ord(x)
e += chr(asc * leg * seed % 10 * len(str(asc)) % 26 + 65)
e += str((asc * 3 * leg + 42 * len(str(asc))) % 10 + len(e) + seed % 10)
e += chr(len(e) % 26 + 65)
seed *= int.from_bytes(e.encode(), 'big')
seed += int.from_bytes(s.encode(), 'big')
nature.seed(seed + seed2)
str_var = list(e)
nature.shuffle(str_var)
e = ''.join(str_var)
for x in range(1, 6):
str_var = list(e)
nature.shuffle(str_var)
e += ''.join(str_var)
return e[-chars:]
def repeat_amstk7(s: str, repeats: int, chars: int = 64):
for x in range(repeats):
s = amstk7(s, chars)
return s
| 29.508197
| 80
| 0.488333
| 294
| 1,800
| 2.928571
| 0.139456
| 0.083624
| 0.083624
| 0.03252
| 0.840883
| 0.840883
| 0.836237
| 0.836237
| 0.836237
| 0.836237
| 0
| 0.056106
| 0.326667
| 1,800
| 60
| 81
| 30
| 0.65429
| 0
| 0
| 0.8
| 0
| 0
| 0.006667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.018182
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d46dde59c40b21daf8053344583db83375c83c90
| 294
|
py
|
Python
|
HOZO/hozo/__init__.py
|
jsgubin/HOZOG
|
cae6ac386d1b43c70d269e47e10ba4a4ac7aed4a
|
[
"MIT"
] | 4
|
2021-04-19T21:01:56.000Z
|
2021-09-05T06:54:47.000Z
|
HOZO/hozo/__init__.py
|
jsgubin/HOZOG
|
cae6ac386d1b43c70d269e47e10ba4a4ac7aed4a
|
[
"MIT"
] | 1
|
2022-01-24T21:32:42.000Z
|
2022-01-24T21:32:42.000Z
|
HOZO/hozo/__init__.py
|
jsgubin/HOZOG
|
cae6ac386d1b43c70d269e47e10ba4a4ac7aed4a
|
[
"MIT"
] | null | null | null |
from hozo.logistic import *
from hozo.utils import *
from hozo.hozo import *
# from hozo.datasets import Dataset, Datasets
from hozo.models import *
from hozo.data_hyper_cleaning_ho import *
from hozo.data_hyper_cleaning_bo import *
from hozo.ZOG import *
from hozo.data_cleaning_model import *
| 32.666667
| 45
| 0.809524
| 46
| 294
| 5
| 0.326087
| 0.313043
| 0.426087
| 0.234783
| 0.269565
| 0.269565
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12585
| 294
| 9
| 46
| 32.666667
| 0.894942
| 0.146259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d476076839bc5f4132bbb9b8507dafd40035d591
| 116
|
py
|
Python
|
policy_driven_attack/policy/imagenet/__init__.py
|
machanic/TangentAttack
|
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
|
[
"Apache-2.0"
] | 4
|
2021-11-12T04:06:32.000Z
|
2022-01-27T09:01:41.000Z
|
policy_driven_attack/policy/imagenet/__init__.py
|
machanic/TangentAttack
|
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
|
[
"Apache-2.0"
] | 1
|
2022-02-22T14:00:59.000Z
|
2022-02-25T08:57:29.000Z
|
policy_driven_attack/policy/imagenet/__init__.py
|
machanic/TangentAttack
|
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
|
[
"Apache-2.0"
] | null | null | null |
from policy_driven_attack.policy.imagenet.empty import *
from policy_driven_attack.policy.imagenet.vgg_inv import *
| 38.666667
| 58
| 0.862069
| 17
| 116
| 5.588235
| 0.529412
| 0.210526
| 0.336842
| 0.463158
| 0.757895
| 0.757895
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 116
| 2
| 59
| 58
| 0.87963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
d477f7bb49ff7acd2594eb383b9133fc9dacd4e6
| 178
|
py
|
Python
|
Configuration/StandardSequences/python/FrontierConditions_GlobalTag_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Configuration/StandardSequences/python/FrontierConditions_GlobalTag_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Configuration/StandardSequences/python/FrontierConditions_GlobalTag_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.CondDBESSource_cff import *
from Configuration.StandardSequences.AdditionalConditions_cff import *
| 35.6
| 70
| 0.882022
| 18
| 178
| 8.611111
| 0.666667
| 0.219355
| 0.43871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073034
| 178
| 4
| 71
| 44.5
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5ceb2c5bf2d4c18dd016e1dfa1274031ff035037
| 54
|
py
|
Python
|
cv19gm/models/__init__.py
|
DLab/covid19geomodeller
|
a3a9eedf064078b21be0928ee41b41c902938eff
|
[
"MIT"
] | null | null | null |
cv19gm/models/__init__.py
|
DLab/covid19geomodeller
|
a3a9eedf064078b21be0928ee41b41c902938eff
|
[
"MIT"
] | null | null | null |
cv19gm/models/__init__.py
|
DLab/covid19geomodeller
|
a3a9eedf064078b21be0928ee41b41c902938eff
|
[
"MIT"
] | null | null | null |
import cv19gm.models.seir
import cv19gm.models.seirhvd
| 27
| 28
| 0.87037
| 8
| 54
| 5.875
| 0.625
| 0.510638
| 0.765957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 0.055556
| 54
| 2
| 28
| 27
| 0.843137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d8fe5f4663457bac94156103b7110322cbe61db7
| 101
|
py
|
Python
|
src/kidmaya/core/__init__.py
|
KidKaboom/Kid-Maya-2022
|
0daec301a63438d681cc4c3a5df6d4efdc70daef
|
[
"MIT"
] | null | null | null |
src/kidmaya/core/__init__.py
|
KidKaboom/Kid-Maya-2022
|
0daec301a63438d681cc4c3a5df6d4efdc70daef
|
[
"MIT"
] | null | null | null |
src/kidmaya/core/__init__.py
|
KidKaboom/Kid-Maya-2022
|
0daec301a63438d681cc4c3a5df6d4efdc70daef
|
[
"MIT"
] | null | null | null |
# :coding: utf-8
from kidmaya.core.kmcommand import KMCommand
from kidmaya.core.kmtool import KMTool
| 25.25
| 44
| 0.811881
| 15
| 101
| 5.466667
| 0.6
| 0.268293
| 0.365854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.108911
| 101
| 3
| 45
| 33.666667
| 0.9
| 0.138614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
996d710cba1b717ed5a682ad2c0ddbb9b26bc8c9
| 28,336
|
py
|
Python
|
skidl/libs/digital-audio_sklib.py
|
arjenroodselaar/skidl
|
0bf801bd3b74e6ef94bd9aa1b68eef756b568276
|
[
"MIT"
] | 700
|
2016-08-16T21:12:50.000Z
|
2021-10-10T02:15:18.000Z
|
skidl/libs/digital-audio_sklib.py
|
0dvictor/skidl
|
458709a10b28a864d25ae2c2b44c6103d4ddb291
|
[
"MIT"
] | 118
|
2016-08-16T20:51:05.000Z
|
2021-10-10T08:07:18.000Z
|
skidl/libs/digital-audio_sklib.py
|
0dvictor/skidl
|
458709a10b28a864d25ae2c2b44c6103d4ddb291
|
[
"MIT"
] | 94
|
2016-08-25T14:02:28.000Z
|
2021-09-12T05:17:08.000Z
|
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
digital_audio = SchLib(tool=SKIDL).add_parts(*[
Part(name='AK5392VS',dest=TEMPLATE,tool=SKIDL,keywords='24bit Sigma Delta Audio ADC 2ch',description='AK5392-VS, Enhanced Audio ADC, 2 channels Sigma Delta, 24bit, SO28',ref_prefix='U',num_units=1,fplist=['SO*'],do_erc=True,pins=[
Pin(num='1',name='VREFL',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='GNDL',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='VCOML',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='AINL+',do_erc=True),
Pin(num='5',name='AINL-',do_erc=True),
Pin(num='6',name='ZCAL',do_erc=True),
Pin(num='7',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='CAL',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='~RST~',do_erc=True),
Pin(num='20',name='TEST',do_erc=True),
Pin(num='11',name='SMODE2',do_erc=True),
Pin(num='21',name='BGND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='SMODE1',do_erc=True),
Pin(num='22',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='LRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='VA',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='SCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='AINR-',do_erc=True),
Pin(num='15',name='SDATA',func=Pin.OUTPUT,do_erc=True),
Pin(num='25',name='AINR+',do_erc=True),
Pin(num='16',name='FSYNC',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='VCOMR',func=Pin.OUTPUT,do_erc=True),
Pin(num='17',name='CLK',do_erc=True),
Pin(num='27',name='GNDR',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='CMODE',do_erc=True),
Pin(num='28',name='VREFR',func=Pin.OUTPUT,do_erc=True),
Pin(num='19',name='HPFE',do_erc=True)]),
Part(name='AK5393VS',dest=TEMPLATE,tool=SKIDL,keywords='96kHz 24bit Sigma Delta Audio ADC 2ch',description='Enhanced Audio ADC, 2 channels Sigma Delta, 24bit 96kHz, SO28',ref_prefix='U',num_units=1,fplist=['SO*'],do_erc=True,pins=[
Pin(num='1',name='VREFL',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='GNDL',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='VCOML',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='AINL+',do_erc=True),
Pin(num='5',name='AINL-',do_erc=True),
Pin(num='6',name='ZCAL',do_erc=True),
Pin(num='7',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='CAL',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='~RST~',do_erc=True),
Pin(num='20',name='TEST',do_erc=True),
Pin(num='11',name='SMODE2',do_erc=True),
Pin(num='21',name='BGND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='SMODE1',do_erc=True),
Pin(num='22',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='LRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='VA',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='SCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='AINR-',do_erc=True),
Pin(num='15',name='SDATA',func=Pin.OUTPUT,do_erc=True),
Pin(num='25',name='AINR+',do_erc=True),
Pin(num='16',name='FSYNC',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='VCOMR',func=Pin.OUTPUT,do_erc=True),
Pin(num='17',name='MCLK',do_erc=True),
Pin(num='27',name='GNDR',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='DFS',do_erc=True),
Pin(num='28',name='VREFR',func=Pin.OUTPUT,do_erc=True),
Pin(num='19',name='HPFE',do_erc=True)]),
Part(name='AK5394AVS',dest=TEMPLATE,tool=SKIDL,keywords='192kHz 24bit Sigma Delta Audio ADC 2ch',description='Super High Performance Audio ADC, 2 channels Sigma Delta, 24bit 192kHz, SO28',ref_prefix='U',num_units=1,fplist=['SO*'],do_erc=True,pins=[
Pin(num='1',name='VREFL+',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='VREFL-',func=Pin.OUTPUT,do_erc=True),
Pin(num='3',name='VCOML',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='AINL+',do_erc=True),
Pin(num='5',name='AINL-',do_erc=True),
Pin(num='6',name='ZCAL',do_erc=True),
Pin(num='7',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='CAL',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='~RST~',do_erc=True),
Pin(num='20',name='DFS1',do_erc=True),
Pin(num='11',name='SMODE2',do_erc=True),
Pin(num='21',name='BGND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='SMODE1',do_erc=True),
Pin(num='22',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='LRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='VA',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='SCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='AINR-',do_erc=True),
Pin(num='15',name='SDATA',func=Pin.OUTPUT,do_erc=True),
Pin(num='25',name='AINR+',do_erc=True),
Pin(num='16',name='FSYNC',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='VCOMR',func=Pin.OUTPUT,do_erc=True),
Pin(num='17',name='MCLK',do_erc=True),
Pin(num='27',name='VREFR-',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='DFS0',do_erc=True),
Pin(num='28',name='VREFR+',func=Pin.OUTPUT,do_erc=True),
Pin(num='19',name='HPFE',do_erc=True)]),
Part(name='CS4245',dest=TEMPLATE,tool=SKIDL,keywords='CS4245 stereo audio codec',description='Stereo Audio CODEC, 104 dB, 24-Bit, 192 kHz',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='SDA/CDOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='SCL/CCLK',do_erc=True),
Pin(num='3',name='AD0/~CS',do_erc=True),
Pin(num='4',name='AD1/CDIN',do_erc=True),
Pin(num='5',name='VLC',do_erc=True),
Pin(num='6',name='~RESET',do_erc=True),
Pin(num='7',name='AIN3A',do_erc=True),
Pin(num='8',name='AIN3B',do_erc=True),
Pin(num='9',name='AIN2A',do_erc=True),
Pin(num='10',name='AIN2B',do_erc=True),
Pin(num='20',name='FILT2+',func=Pin.OUTPUT,do_erc=True),
Pin(num='30',name='VA',func=Pin.PWRIN,do_erc=True),
Pin(num='40',name='MCLK2',do_erc=True),
Pin(num='11',name='AIN1A',do_erc=True),
Pin(num='21',name='AIN4A/MICIN1',do_erc=True),
Pin(num='31',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='41',name='SDOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='AIN1B',do_erc=True),
Pin(num='22',name='AIN4B/MICIN2',do_erc=True),
Pin(num='32',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='42',name='SCLK1',func=Pin.BIDIR,do_erc=True),
Pin(num='13',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='23',name='AIN5A',do_erc=True),
Pin(num='33',name='AOUTA',func=Pin.OUTPUT,do_erc=True),
Pin(num='43',name='LRCK1',func=Pin.BIDIR,do_erc=True),
Pin(num='14',name='VA',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='AIN5B',do_erc=True),
Pin(num='34',name='AOUTB',func=Pin.OUTPUT,do_erc=True),
Pin(num='44',name='MCLK1',do_erc=True),
Pin(num='15',name='AFILTA',func=Pin.OUTPUT,do_erc=True),
Pin(num='25',name='MICBIAS',func=Pin.OUTPUT,do_erc=True),
Pin(num='35',name='~MUTEC',func=Pin.OUTPUT,do_erc=True),
Pin(num='45',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='16',name='AFILTB',func=Pin.OUTPUT,do_erc=True),
Pin(num='26',name='AIN6A',do_erc=True),
Pin(num='36',name='VLS',func=Pin.PWRIN,do_erc=True),
Pin(num='46',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='17',name='VQ1',func=Pin.OUTPUT,do_erc=True),
Pin(num='27',name='AIN6B',do_erc=True),
Pin(num='37',name='SDIN',do_erc=True),
Pin(num='47',name='INT',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='VQ2',func=Pin.OUTPUT,do_erc=True),
Pin(num='28',name='AUXOUTA',func=Pin.OUTPUT,do_erc=True),
Pin(num='38',name='SCLK2',func=Pin.BIDIR,do_erc=True),
Pin(num='48',name='OVFL',func=Pin.OUTPUT,do_erc=True),
Pin(num='19',name='FILT1+',func=Pin.OUTPUT,do_erc=True),
Pin(num='29',name='AUXOUTB',func=Pin.OUTPUT,do_erc=True),
Pin(num='39',name='LRCK2',func=Pin.BIDIR,do_erc=True)]),
Part(name='CS43L21',dest=TEMPLATE,tool=SKIDL,keywords='stereo audio dac',description='Stereo Audio DAC, 24-bit, 96 kHz, 98 dB',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='LRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='SDA/MCLKDIV2',func=Pin.BIDIR,do_erc=True),
Pin(num='3',name='SCL/CCLK/I2S/~LJ',do_erc=True),
Pin(num='4',name='AD0/~CS~/DEM',do_erc=True),
Pin(num='5',name='VA_HP',do_erc=True),
Pin(num='6',name='FLYP',do_erc=True),
Pin(num='7',name='GND_HP',do_erc=True),
Pin(num='8',name='FLYN',do_erc=True),
Pin(num='9',name='VSS_HP',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='AOUTB',func=Pin.OUTPUT,do_erc=True),
Pin(num='30',name='MCLK',do_erc=True),
Pin(num='11',name='AOUTA',func=Pin.OUTPUT,do_erc=True),
Pin(num='31',name='SCLK',do_erc=True),
Pin(num='12',name='VA',do_erc=True),
Pin(num='32',name='SDIN',do_erc=True),
Pin(num='13',name='AGND',do_erc=True),
Pin(num='14',name='FILT+',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='VQ',func=Pin.OUTPUT,do_erc=True),
Pin(num='25',name='~RESET',do_erc=True),
Pin(num='26',name='VL',do_erc=True),
Pin(num='27',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='28',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='29',name='TESTO/M/~S',do_erc=True)]),
Part(name='CS5361',dest=TEMPLATE,tool=SKIDL,keywords='stereo audio adc',description='Stereo Audio ADC, 24 bits, 192 kHz, 114 dB',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='RST',do_erc=True),
Pin(num='2',name='M/~S',do_erc=True),
Pin(num='3',name='LRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='SCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='MCLK',do_erc=True),
Pin(num='6',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='VL',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='SDOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='MDIV',do_erc=True),
Pin(num='20',name='AINR-',do_erc=True),
Pin(num='11',name='~HPF',do_erc=True),
Pin(num='21',name='AINR+',do_erc=True),
Pin(num='12',name='I2S/~LJ',do_erc=True),
Pin(num='22',name='VQ',func=Pin.PWROUT,do_erc=True),
Pin(num='13',name='M0',do_erc=True),
Pin(num='23',name='REFGND',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='M1',do_erc=True),
Pin(num='24',name='FILT+',func=Pin.PWROUT,do_erc=True),
Pin(num='15',name='~OVFL',do_erc=True),
Pin(num='16',name='AINL+',do_erc=True),
Pin(num='17',name='AINL-',do_erc=True),
Pin(num='18',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='19',name='VA',func=Pin.PWRIN,do_erc=True)]),
Part(name='CS8406',dest=TEMPLATE,tool=SKIDL,keywords='digital audio interface transmitter spdif',description='192 kHz Digital Audio Interface Transmitter (SOIC-28)',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='6',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='~RST',do_erc=True),
Pin(num='21',name='OMCK',do_erc=True),
Pin(num='12',name='ILRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='ISCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='VL',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='SDIN',do_erc=True),
Pin(num='24',name='H/~S',do_erc=True),
Pin(num='25',name='TXN',func=Pin.OUTPUT,do_erc=True),
Pin(num='26',name='TXP',func=Pin.OUTPUT,do_erc=True),
Pin(num='1',name='COPY/C',do_erc=True),
Pin(num='3',name='~EMPH',do_erc=True),
Pin(num='4',name='SFMT0',do_erc=True),
Pin(num='5',name='SFMT1',do_erc=True),
Pin(num='10',name='APMS',do_erc=True),
Pin(num='20',name='HWCK0',do_erc=True),
Pin(num='11',name='TCBLD',do_erc=True),
Pin(num='15',name='TCBL',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='CEN',do_erc=True),
Pin(num='17',name='U',do_erc=True),
Pin(num='27',name='HWCK1',do_erc=True),
Pin(num='18',name='V',do_erc=True),
Pin(num='28',name='ORIG',do_erc=True),
Pin(num='19',name='~AUDIO',func=Pin.OUTPUT,do_erc=True),
Pin(num='1',name='SDA/CDOUT',do_erc=True),
Pin(num='2',name='AD0/CS',do_erc=True),
Pin(num='3',name='AD2',do_erc=True),
Pin(num='4',name='RXP',do_erc=True),
Pin(num='5',name='TEST',do_erc=True),
Pin(num='10',name='TEST',do_erc=True),
Pin(num='20',name='Bit_User',do_erc=True),
Pin(num='11',name='TEST',do_erc=True),
Pin(num='15',name='TCBL',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='TEST',do_erc=True),
Pin(num='17',name='TEST',do_erc=True),
Pin(num='27',name='AD1/CDIN',do_erc=True),
Pin(num='18',name='TEST',do_erc=True),
Pin(num='28',name='SCL/CCLK',do_erc=True),
Pin(num='19',name='INT',func=Pin.OUTPUT,do_erc=True)]),
Part(name='CS8414',dest=TEMPLATE,tool=SKIDL,keywords='digital audio interface receiver spdif',description='96KHz Digital Audio Receiver',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='C',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='CD/F1',func=Pin.OUTPUT,do_erc=True),
Pin(num='3',name='CC/F0',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='CB/E2',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='CA/E1',func=Pin.OUTPUT,do_erc=True),
Pin(num='6',name='C0/E0',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='RXP',do_erc=True),
Pin(num='10',name='RXN',do_erc=True),
Pin(num='20',name='FILT',do_erc=True),
Pin(num='11',name='FSYNC',func=Pin.BIDIR,do_erc=True),
Pin(num='21',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='SCK',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='VA',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='CS12/FCK',do_erc=True),
Pin(num='23',name='M0',do_erc=True),
Pin(num='14',name='U',func=Pin.OUTPUT,do_erc=True),
Pin(num='24',name='M1',do_erc=True),
Pin(num='15',name='CBL',func=Pin.OUTPUT,do_erc=True),
Pin(num='25',name='ERF',func=Pin.OUTPUT,do_erc=True),
Pin(num='16',name='SEL',do_erc=True),
Pin(num='26',name='SDATA',func=Pin.OUTPUT,do_erc=True),
Pin(num='17',name='M3',do_erc=True),
Pin(num='27',name='CE/F2',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='M2',do_erc=True),
Pin(num='28',name='VERF',func=Pin.OUTPUT,do_erc=True),
Pin(num='19',name='MCK',func=Pin.OUTPUT,do_erc=True)]),
Part(name='CS8416-N',dest=TEMPLATE,tool=SKIDL,keywords='digital audio interface receiver spdif',description='192 kHz Digital Audio Interface Receiver (QFN-28)',ref_prefix='U',num_units=1,fplist=['QFN*28*'],do_erc=True,pins=[
Pin(num='1',name='RXP0',do_erc=True),
Pin(num='2',name='RXN',do_erc=True),
Pin(num='3',name='VA',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='FILT',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~RESET',do_erc=True),
Pin(num='7',name='RXP4/RXSEL1',do_erc=True),
Pin(num='8',name='RXP5/RXSEL0',do_erc=True),
Pin(num='9',name='RXP6/TXSEL1',do_erc=True),
Pin(num='10',name='RXP7/TXSEL0',do_erc=True),
Pin(num='20',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='AD0/~CS~/NV/RERR',do_erc=True),
Pin(num='21',name='RMCK',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='AD1/CDIN/~AUDIO',do_erc=True),
Pin(num='22',name='OMCK',do_erc=True),
Pin(num='13',name='SCL/CCLK/96KHZ',do_erc=True),
Pin(num='23',name='SDOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='SDA/CDOUT/RCBL',func=Pin.BIDIR,do_erc=True),
Pin(num='24',name='OSCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='15',name='U/AD2/GPO2',func=Pin.OUTPUT,do_erc=True),
Pin(num='25',name='OLRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='16',name='C/GPO1',func=Pin.OUTPUT,do_erc=True),
Pin(num='26',name='RXP3',do_erc=True),
Pin(num='17',name='TX/GPO0',func=Pin.OUTPUT,do_erc=True),
Pin(num='27',name='RXP2',do_erc=True),
Pin(num='18',name='VL',func=Pin.PWRIN,do_erc=True),
Pin(num='28',name='RXP1',do_erc=True),
Pin(num='19',name='DGND',func=Pin.PWRIN,do_erc=True)]),
Part(name='CS8416-Z',dest=TEMPLATE,tool=SKIDL,keywords='digital audio interface receiver spdif',description='192 kHz Digital Audio Interface Receiver (TSSOP-28)',ref_prefix='U',num_units=1,fplist=['SOIC*28*', '*SSOP*28*'],do_erc=True,aliases=['CS8416-S', 'CS8416'],pins=[
Pin(num='1',name='RXP3',do_erc=True),
Pin(num='2',name='RXP2',do_erc=True),
Pin(num='3',name='RXP1',do_erc=True),
Pin(num='4',name='RXP0',do_erc=True),
Pin(num='5',name='RXN',do_erc=True),
Pin(num='6',name='VA',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='FILT',func=Pin.PASSIVE,do_erc=True),
Pin(num='9',name='~RESET',do_erc=True),
Pin(num='10',name='RXP4/RXSEL1',do_erc=True),
Pin(num='20',name='TX/GPO0',func=Pin.OUTPUT,do_erc=True),
Pin(num='11',name='RXP5/RXSEL0',do_erc=True),
Pin(num='21',name='VL',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='RXP6/TXSEL1',do_erc=True),
Pin(num='22',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='RXP7/TXSEL0',do_erc=True),
Pin(num='23',name='VD',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='AD0/~CS~/NV/RERR',do_erc=True),
Pin(num='24',name='RMCK',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='AD1/CDIN/~AUDIO',do_erc=True),
Pin(num='25',name='OMCK',do_erc=True),
Pin(num='16',name='SCL/CCLK/96KHZ',do_erc=True),
Pin(num='26',name='SDOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='17',name='SDA/CDOUT/RCBL',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='OSCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='18',name='U/AD2/GPO2',func=Pin.OUTPUT,do_erc=True),
Pin(num='28',name='OLRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='C/GPO1',func=Pin.OUTPUT,do_erc=True)]),
Part(name='CS8420_SOFT',dest=TEMPLATE,tool=SKIDL,keywords='digital audio sample rate converter transceiver',description='Digital Audio Sample Rate Converter and Transceiver',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='SDA/CDOUT',func=Pin.BIDIR,do_erc=True),
Pin(num='2',name='AD0/CS-',do_erc=True),
Pin(num='3',name='EMPH-/AD2',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='RXP',do_erc=True),
Pin(num='5',name='RXN',do_erc=True),
Pin(num='6',name='VA',do_erc=True),
Pin(num='7',name='AGND',do_erc=True),
Pin(num='8',name='FILT',do_erc=True),
Pin(num='9',name='RST',do_erc=True),
Pin(num='10',name='RMCK',func=Pin.BIDIR,do_erc=True),
Pin(num='20',name='U',func=Pin.BIDIR,do_erc=True),
Pin(num='11',name='RERR',func=Pin.OUTPUT,do_erc=True),
Pin(num='21',name='OMCK',do_erc=True),
Pin(num='12',name='ILRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='22',name='DGND',do_erc=True),
Pin(num='13',name='ISCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='23',name='VD',do_erc=True),
Pin(num='14',name='SDIN',do_erc=True),
Pin(num='24',name='H/S-',do_erc=True),
Pin(num='15',name='TCBL',func=Pin.BIDIR,do_erc=True),
Pin(num='25',name='TXN',func=Pin.OUTPUT,do_erc=True),
Pin(num='16',name='OSCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='26',name='TXP',func=Pin.OUTPUT,do_erc=True),
Pin(num='17',name='OLRCK',func=Pin.BIDIR,do_erc=True),
Pin(num='27',name='AD1/CDIN',do_erc=True),
Pin(num='18',name='SDOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='28',name='SCL/CCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='INT',func=Pin.OPENCOLL,do_erc=True)]),
Part(name='LM4811',dest=TEMPLATE,tool=SKIDL,keywords='headphone amplifier digital volume',description='Dual105mW Headphone Amplifier, Digital Volume Control, Shutdown Mode',ref_prefix='U',num_units=1,fplist=['VSSOP*', 'WSON*', 'SON*'],do_erc=True,pins=[
Pin(num='1',name='VOUT1',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='VIN1',do_erc=True),
Pin(num='3',name='BYPASS',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='CLOCK',do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='UP/DN',do_erc=True),
Pin(num='7',name='SHDN',do_erc=True),
Pin(num='8',name='VIN2',do_erc=True),
Pin(num='9',name='VOUT2',func=Pin.OUTPUT,do_erc=True),
Pin(num='10',name='VDD',func=Pin.PWRIN,do_erc=True)]),
Part(name='TLV320AIC23BPW',dest=TEMPLATE,tool=SKIDL,keywords='Stero Audio CODEC 96kHz Headphone',description='8-96kHz Stero Audio CODEC w/ Headphone Amp, TSSOP28',ref_prefix='U',num_units=1,fplist=['TSSOP*'],do_erc=True,pins=[
Pin(num='1',name='BVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='CLKOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='3',name='BCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='4',name='DIN',do_erc=True),
Pin(num='5',name='LRCIN',do_erc=True),
Pin(num='6',name='DOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='LRCOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='HPVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='LHPOUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='10',name='RHPOUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='20',name='LLINEIN',func=Pin.PASSIVE,do_erc=True),
Pin(num='11',name='HPGND',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='~CS~',do_erc=True),
Pin(num='12',name='LOUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='22',name='MODE',do_erc=True),
Pin(num='13',name='ROUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='23',name='SDIN',do_erc=True),
Pin(num='14',name='AVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='24',name='SCLK',do_erc=True),
Pin(num='15',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='25',name='XTI/MCK',func=Pin.PASSIVE,do_erc=True),
Pin(num='16',name='VMID',func=Pin.PASSIVE,do_erc=True),
Pin(num='26',name='XTO',func=Pin.PASSIVE,do_erc=True),
Pin(num='17',name='MICBIAS',func=Pin.PASSIVE,do_erc=True),
Pin(num='27',name='DVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='18',name='MICIN',func=Pin.PASSIVE,do_erc=True),
Pin(num='28',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='19',name='RLINEIN',func=Pin.PASSIVE,do_erc=True)]),
Part(name='TLV320AIC23BRHD',dest=TEMPLATE,tool=SKIDL,keywords='Stero Audio CODEC 96kHz Headphone',description='8-96kHz Stero Audio CODEC w/ Headphone Amp, QFN28',ref_prefix='U',num_units=1,fplist=['QFN*'],do_erc=True,pins=[
Pin(num='1',name='DIN',do_erc=True),
Pin(num='2',name='LRCIN',do_erc=True),
Pin(num='3',name='DOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='LRCOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='HPVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='LHPOUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='RHPOUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='HPGND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='LOUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='10',name='ROUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='20',name='SDIN',do_erc=True),
Pin(num='11',name='AVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='SCLK',do_erc=True),
Pin(num='12',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='22',name='XTI/MCK',func=Pin.PASSIVE,do_erc=True),
Pin(num='13',name='VMID',func=Pin.PASSIVE,do_erc=True),
Pin(num='23',name='XTO',func=Pin.PASSIVE,do_erc=True),
Pin(num='14',name='MICBIAS',func=Pin.PASSIVE,do_erc=True),
Pin(num='24',name='DVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='MICIN',func=Pin.PASSIVE,do_erc=True),
Pin(num='25',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='16',name='RLINEIN',func=Pin.PASSIVE,do_erc=True),
Pin(num='26',name='BVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='17',name='LLINEIN',func=Pin.PASSIVE,do_erc=True),
Pin(num='27',name='CLKOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='~CS~',do_erc=True),
Pin(num='28',name='BCLK',func=Pin.BIDIR,do_erc=True),
Pin(num='19',name='MODE',do_erc=True)]),
Part(name='TPA5050',dest=TEMPLATE,tool=SKIDL,keywords='AUDIO',description='Stereo Digital Audio Delay Processor With I2C Control',ref_prefix='U',num_units=1,do_erc=True,pins=[
Pin(num='1',name='LRCLK',do_erc=True),
Pin(num='2',name='DATA',do_erc=True),
Pin(num='3',name='SCL',do_erc=True),
Pin(num='4',name='SDA',func=Pin.BIDIR,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='ADD0',do_erc=True),
Pin(num='11',name='ADD1',do_erc=True),
Pin(num='12',name='ADD2',do_erc=True),
Pin(num='13',name='VDD',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='DATA_OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='16',name='BCLK',do_erc=True)])])
| 65.290323
| 279
| 0.575381
| 4,597
| 28,336
| 3.444855
| 0.078965
| 0.135135
| 0.243243
| 0.301591
| 0.919045
| 0.895491
| 0.892587
| 0.741791
| 0.649533
| 0.634819
| 0
| 0.043797
| 0.200663
| 28,336
| 433
| 280
| 65.441109
| 0.655364
| 0
| 0
| 0.25522
| 0
| 0
| 0.146633
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.058005
| 0.00232
| 0
| 0.00232
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
41ed60e84a632411c07868017be83cda11679005
| 66,870
|
py
|
Python
|
silver/migrations/0001_initial.py
|
atkinsond/silver
|
7e88db324ea7380dbc1b03cf18911a614a51e2b3
|
[
"Apache-2.0"
] | null | null | null |
silver/migrations/0001_initial.py
|
atkinsond/silver
|
7e88db324ea7380dbc1b03cf18911a614a51e2b3
|
[
"Apache-2.0"
] | null | null | null |
silver/migrations/0001_initial.py
|
atkinsond/silver
|
7e88db324ea7380dbc1b03cf18911a614a51e2b3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-18 06:25
from __future__ import unicode_literals
import annoying.fields
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_fsm
import json
import livefield.fields
import silver.models.documents.base
import silver.models.documents.pdf
import silver.utils.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BillingDocumentBase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kind', models.CharField(db_index=True, max_length=8, verbose_name=silver.models.documents.base.get_billing_documents_kinds)),
('series', models.CharField(blank=True, db_index=True, max_length=20, null=True)),
('number', models.IntegerField(blank=True, db_index=True, null=True)),
('archived_customer', annoying.fields.JSONField(blank=True, default=dict, deserializer=json.loads, null=True, serializer=annoying.fields.dumps)),
('archived_provider', annoying.fields.JSONField(blank=True, default=dict, deserializer=json.loads, null=True, serializer=annoying.fields.dumps)),
('due_date', models.DateField(blank=True, null=True)),
('issue_date', models.DateField(blank=True, db_index=True, null=True)),
('paid_date', models.DateField(blank=True, null=True)),
('cancel_date', models.DateField(blank=True, null=True)),
('sales_tax_percent', models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True, validators=[django.core.validators.MinValueValidator(0.0)])),
('sales_tax_name', models.CharField(blank=True, max_length=64, null=True)),
('currency', models.CharField(choices=[('AED', 'AED (UAE Dirham)'), ('AFN', 'AFN (Afghani)'), ('ALL', 'ALL (Lek)'), ('AMD', 'AMD (Armenian Dram)'), ('ANG', 'ANG (Netherlands Antillean Guilder)'), ('AOA', 'AOA (Kwanza)'), ('ARS', 'ARS (Argentine Peso)'), ('AUD', 'AUD (Australian Dollar)'), ('AWG', 'AWG (Aruban Florin)'), ('AZN', 'AZN (Azerbaijanian Manat)'), ('BAM', 'BAM (Convertible Mark)'), ('BBD', 'BBD (Barbados Dollar)'), ('BDT', 'BDT (Taka)'), ('BGN', 'BGN (Bulgarian Lev)'), ('BHD', 'BHD (Bahraini Dinar)'), ('BIF', 'BIF (Burundi Franc)'), ('BMD', 'BMD (Bermudian Dollar)'), ('BND', 'BND (Brunei Dollar)'), ('BOB', 'BOB (Boliviano)'), ('BRL', 'BRL (Brazilian Real)'), ('BSD', 'BSD (Bahamian Dollar)'), ('BTN', 'BTN (Ngultrum)'), ('BWP', 'BWP (Pula)'), ('BYN', 'BYN (Belarusian Ruble)'), ('BZD', 'BZD (Belize Dollar)'), ('CAD', 'CAD (Canadian Dollar)'), ('CDF', 'CDF (Congolese Franc)'), ('CHF', 'CHF (Swiss Franc)'), ('CLP', 'CLP (Chilean Peso)'), ('CNY', 'CNY (Yuan Renminbi)'), ('COP', 'COP (Colombian Peso)'), ('CRC', 'CRC (Costa Rican Colon)'), ('CUC', 'CUC (Peso Convertible)'), ('CUP', 'CUP (Cuban Peso)'), ('CVE', 'CVE (Cabo Verde Escudo)'), ('CZK', 'CZK (Czech Koruna)'), ('DJF', 'DJF (Djibouti Franc)'), ('DKK', 'DKK (Danish Krone)'), ('DOP', 'DOP (Dominican Peso)'), ('DZD', 'DZD (Algerian Dinar)'), ('EGP', 'EGP (Egyptian Pound)'), ('ERN', 'ERN (Nakfa)'), ('ETB', 'ETB (Ethiopian Birr)'), ('EUR', 'EUR (Euro)'), ('FJD', 'FJD (Fiji Dollar)'), ('FKP', 'FKP (Falkland Islands Pound)'), ('GBP', 'GBP (Pound Sterling)'), ('GEL', 'GEL (Lari)'), ('GHS', 'GHS (Ghana Cedi)'), ('GIP', 'GIP (Gibraltar Pound)'), ('GMD', 'GMD (Dalasi)'), ('GNF', 'GNF (Guinea Franc)'), ('GTQ', 'GTQ (Quetzal)'), ('GYD', 'GYD (Guyana Dollar)'), ('HKD', 'HKD (Hong Kong Dollar)'), ('HNL', 'HNL (Lempira)'), ('HRK', 'HRK (Kuna)'), ('HTG', 'HTG (Gourde)'), ('HUF', 'HUF (Forint)'), ('IDR', 'IDR (Rupiah)'), ('ILS', 'ILS (New Israeli Sheqel)'), ('INR', 'INR (Indian Rupee)'), ('IQD', 'IQD (Iraqi Dinar)'), ('IRR', 'IRR (Iranian Rial)'), ('ISK', 'ISK (Iceland Krona)'), ('JMD', 'JMD (Jamaican Dollar)'), ('JOD', 'JOD (Jordanian Dinar)'), ('JPY', 'JPY (Yen)'), ('KES', 'KES (Kenyan Shilling)'), ('KGS', 'KGS (Som)'), ('KHR', 'KHR (Riel)'), ('KMF', 'KMF (Comoro Franc)'), ('KPW', 'KPW (North Korean Won)'), ('KRW', 'KRW (Won)'), ('KWD', 'KWD (Kuwaiti Dinar)'), ('KYD', 'KYD (Cayman Islands Dollar)'), ('KZT', 'KZT (Tenge)'), ('LAK', 'LAK (Kip)'), ('LBP', 'LBP (Lebanese Pound)'), ('LKR', 'LKR (Sri Lanka Rupee)'), ('LRD', 'LRD (Liberian Dollar)'), ('LSL', 'LSL (Loti)'), ('LYD', 'LYD (Libyan Dinar)'), ('MAD', 'MAD (Moroccan Dirham)'), ('MDL', 'MDL (Moldovan Leu)'), ('MGA', 'MGA (Malagasy Ariary)'), ('MKD', 'MKD (Denar)'), ('MMK', 'MMK (Kyat)'), ('MNT', 'MNT (Tugrik)'), ('MOP', 'MOP (Pataca)'), ('MRO', 'MRO (Ouguiya)'), ('MUR', 'MUR (Mauritius Rupee)'), ('MVR', 'MVR (Rufiyaa)'), ('MWK', 'MWK (Malawi Kwacha)'), ('MXN', 'MXN (Mexican Peso)'), ('MYR', 'MYR (Malaysian Ringgit)'), ('MZN', 'MZN (Mozambique Metical)'), ('NAD', 'NAD (Namibia Dollar)'), ('NGN', 'NGN (Naira)'), ('NIO', 'NIO (Cordoba Oro)'), ('NOK', 'NOK (Norwegian Krone)'), ('NPR', 'NPR (Nepalese Rupee)'), ('NZD', 'NZD (New Zealand Dollar)'), ('OMR', 'OMR (Rial Omani)'), ('PAB', 'PAB (Balboa)'), ('PEN', 'PEN (Sol)'), ('PGK', 'PGK (Kina)'), ('PHP', 'PHP (Philippine Peso)'), ('PKR', 'PKR (Pakistan Rupee)'), ('PLN', 'PLN (Zloty)'), ('PYG', 'PYG (Guarani)'), ('QAR', 'QAR (Qatari Rial)'), ('RON', 'RON (Romanian Leu)'), ('RSD', 'RSD (Serbian Dinar)'), ('RUB', 'RUB (Russian Ruble)'), ('RWF', 'RWF (Rwanda Franc)'), ('SAR', 'SAR (Saudi Riyal)'), ('SBD', 'SBD (Solomon Islands Dollar)'), ('SCR', 'SCR (Seychelles Rupee)'), ('SDG', 'SDG (Sudanese Pound)'), ('SEK', 'SEK (Swedish Krona)'), ('SGD', 'SGD (Singapore Dollar)'), ('SHP', 'SHP (Saint Helena Pound)'), ('SLL', 'SLL (Leone)'), ('SOS', 'SOS (Somali Shilling)'), ('SRD', 'SRD (Surinam Dollar)'), ('SSP', 'SSP (South Sudanese Pound)'), ('STD', 'STD (Dobra)'), ('SVC', 'SVC (El Salvador Colon)'), ('SYP', 'SYP (Syrian Pound)'), ('SZL', 'SZL (Lilangeni)'), ('THB', 'THB (Baht)'), ('TJS', 'TJS (Somoni)'), ('TMT', 'TMT (Turkmenistan New Manat)'), ('TND', 'TND (Tunisian Dinar)'), ('TOP', 'TOP (Pa’anga)'), ('TRY', 'TRY (Turkish Lira)'), ('TTD', 'TTD (Trinidad and Tobago Dollar)'), ('TWD', 'TWD (New Taiwan Dollar)'), ('TZS', 'TZS (Tanzanian Shilling)'), ('UAH', 'UAH (Hryvnia)'), ('UGX', 'UGX (Uganda Shilling)'), ('USD', 'USD (US Dollar)'), ('UYU', 'UYU (Peso Uruguayo)'), ('UZS', 'UZS (Uzbekistan Sum)'), ('VEF', 'VEF (Bolívar)'), ('VND', 'VND (Dong)'), ('VUV', 'VUV (Vatu)'), ('WST', 'WST (Tala)'), ('XAF', 'XAF (CFA Franc BEAC)'), ('XAG', 'XAG (Silver)'), ('XAU', 'XAU (Gold)'), ('XBA', 'XBA (Bond Markets Unit European Composite Unit (EURCO))'), ('XBB', 'XBB (Bond Markets Unit European Monetary Unit (E.M.U.-6))'), ('XBC', 'XBC (Bond Markets Unit European Unit of Account 9 (E.U.A.-9))'), ('XBD', 'XBD (Bond Markets Unit European Unit of Account 17 (E.U.A.-17))'), ('XCD', 'XCD (East Caribbean Dollar)'), ('XDR', 'XDR (SDR (Special Drawing Right))'), ('XOF', 'XOF (CFA Franc BCEAO)'), ('XPD', 'XPD (Palladium)'), ('XPF', 'XPF (CFP Franc)'), ('XPT', 'XPT (Platinum)'), ('XSU', 'XSU (Sucre)'), ('XTS', 'XTS (Codes specifically reserved for testing purposes)'), ('XUA', 'XUA (ADB Unit of Account)'), ('XXX', 'XXX (The codes assigned for transactions where no currency is involved)'), ('YER', 'YER (Yemeni Rial)'), ('ZAR', 'ZAR (Rand)'), ('ZMW', 'ZMW (Zambian Kwacha)'), ('ZWL', 'ZWL (Zimbabwe Dollar)')], default='USD', help_text='The currency used for billing.', max_length=4)),
('transaction_currency', models.CharField(choices=[('AED', 'AED (UAE Dirham)'), ('AFN', 'AFN (Afghani)'), ('ALL', 'ALL (Lek)'), ('AMD', 'AMD (Armenian Dram)'), ('ANG', 'ANG (Netherlands Antillean Guilder)'), ('AOA', 'AOA (Kwanza)'), ('ARS', 'ARS (Argentine Peso)'), ('AUD', 'AUD (Australian Dollar)'), ('AWG', 'AWG (Aruban Florin)'), ('AZN', 'AZN (Azerbaijanian Manat)'), ('BAM', 'BAM (Convertible Mark)'), ('BBD', 'BBD (Barbados Dollar)'), ('BDT', 'BDT (Taka)'), ('BGN', 'BGN (Bulgarian Lev)'), ('BHD', 'BHD (Bahraini Dinar)'), ('BIF', 'BIF (Burundi Franc)'), ('BMD', 'BMD (Bermudian Dollar)'), ('BND', 'BND (Brunei Dollar)'), ('BOB', 'BOB (Boliviano)'), ('BRL', 'BRL (Brazilian Real)'), ('BSD', 'BSD (Bahamian Dollar)'), ('BTN', 'BTN (Ngultrum)'), ('BWP', 'BWP (Pula)'), ('BYN', 'BYN (Belarusian Ruble)'), ('BZD', 'BZD (Belize Dollar)'), ('CAD', 'CAD (Canadian Dollar)'), ('CDF', 'CDF (Congolese Franc)'), ('CHF', 'CHF (Swiss Franc)'), ('CLP', 'CLP (Chilean Peso)'), ('CNY', 'CNY (Yuan Renminbi)'), ('COP', 'COP (Colombian Peso)'), ('CRC', 'CRC (Costa Rican Colon)'), ('CUC', 'CUC (Peso Convertible)'), ('CUP', 'CUP (Cuban Peso)'), ('CVE', 'CVE (Cabo Verde Escudo)'), ('CZK', 'CZK (Czech Koruna)'), ('DJF', 'DJF (Djibouti Franc)'), ('DKK', 'DKK (Danish Krone)'), ('DOP', 'DOP (Dominican Peso)'), ('DZD', 'DZD (Algerian Dinar)'), ('EGP', 'EGP (Egyptian Pound)'), ('ERN', 'ERN (Nakfa)'), ('ETB', 'ETB (Ethiopian Birr)'), ('EUR', 'EUR (Euro)'), ('FJD', 'FJD (Fiji Dollar)'), ('FKP', 'FKP (Falkland Islands Pound)'), ('GBP', 'GBP (Pound Sterling)'), ('GEL', 'GEL (Lari)'), ('GHS', 'GHS (Ghana Cedi)'), ('GIP', 'GIP (Gibraltar Pound)'), ('GMD', 'GMD (Dalasi)'), ('GNF', 'GNF (Guinea Franc)'), ('GTQ', 'GTQ (Quetzal)'), ('GYD', 'GYD (Guyana Dollar)'), ('HKD', 'HKD (Hong Kong Dollar)'), ('HNL', 'HNL (Lempira)'), ('HRK', 'HRK (Kuna)'), ('HTG', 'HTG (Gourde)'), ('HUF', 'HUF (Forint)'), ('IDR', 'IDR (Rupiah)'), ('ILS', 'ILS (New Israeli Sheqel)'), ('INR', 'INR (Indian Rupee)'), ('IQD', 'IQD (Iraqi Dinar)'), ('IRR', 'IRR (Iranian Rial)'), ('ISK', 'ISK (Iceland Krona)'), ('JMD', 'JMD (Jamaican Dollar)'), ('JOD', 'JOD (Jordanian Dinar)'), ('JPY', 'JPY (Yen)'), ('KES', 'KES (Kenyan Shilling)'), ('KGS', 'KGS (Som)'), ('KHR', 'KHR (Riel)'), ('KMF', 'KMF (Comoro Franc)'), ('KPW', 'KPW (North Korean Won)'), ('KRW', 'KRW (Won)'), ('KWD', 'KWD (Kuwaiti Dinar)'), ('KYD', 'KYD (Cayman Islands Dollar)'), ('KZT', 'KZT (Tenge)'), ('LAK', 'LAK (Kip)'), ('LBP', 'LBP (Lebanese Pound)'), ('LKR', 'LKR (Sri Lanka Rupee)'), ('LRD', 'LRD (Liberian Dollar)'), ('LSL', 'LSL (Loti)'), ('LYD', 'LYD (Libyan Dinar)'), ('MAD', 'MAD (Moroccan Dirham)'), ('MDL', 'MDL (Moldovan Leu)'), ('MGA', 'MGA (Malagasy Ariary)'), ('MKD', 'MKD (Denar)'), ('MMK', 'MMK (Kyat)'), ('MNT', 'MNT (Tugrik)'), ('MOP', 'MOP (Pataca)'), ('MRO', 'MRO (Ouguiya)'), ('MUR', 'MUR (Mauritius Rupee)'), ('MVR', 'MVR (Rufiyaa)'), ('MWK', 'MWK (Malawi Kwacha)'), ('MXN', 'MXN (Mexican Peso)'), ('MYR', 'MYR (Malaysian Ringgit)'), ('MZN', 'MZN (Mozambique Metical)'), ('NAD', 'NAD (Namibia Dollar)'), ('NGN', 'NGN (Naira)'), ('NIO', 'NIO (Cordoba Oro)'), ('NOK', 'NOK (Norwegian Krone)'), ('NPR', 'NPR (Nepalese Rupee)'), ('NZD', 'NZD (New Zealand Dollar)'), ('OMR', 'OMR (Rial Omani)'), ('PAB', 'PAB (Balboa)'), ('PEN', 'PEN (Sol)'), ('PGK', 'PGK (Kina)'), ('PHP', 'PHP (Philippine Peso)'), ('PKR', 'PKR (Pakistan Rupee)'), ('PLN', 'PLN (Zloty)'), ('PYG', 'PYG (Guarani)'), ('QAR', 'QAR (Qatari Rial)'), ('RON', 'RON (Romanian Leu)'), ('RSD', 'RSD (Serbian Dinar)'), ('RUB', 'RUB (Russian Ruble)'), ('RWF', 'RWF (Rwanda Franc)'), ('SAR', 'SAR (Saudi Riyal)'), ('SBD', 'SBD (Solomon Islands Dollar)'), ('SCR', 'SCR (Seychelles Rupee)'), ('SDG', 'SDG (Sudanese Pound)'), ('SEK', 'SEK (Swedish Krona)'), ('SGD', 'SGD (Singapore Dollar)'), ('SHP', 'SHP (Saint Helena Pound)'), ('SLL', 'SLL (Leone)'), ('SOS', 'SOS (Somali Shilling)'), ('SRD', 'SRD (Surinam Dollar)'), ('SSP', 'SSP (South Sudanese Pound)'), ('STD', 'STD (Dobra)'), ('SVC', 'SVC (El Salvador Colon)'), ('SYP', 'SYP (Syrian Pound)'), ('SZL', 'SZL (Lilangeni)'), ('THB', 'THB (Baht)'), ('TJS', 'TJS (Somoni)'), ('TMT', 'TMT (Turkmenistan New Manat)'), ('TND', 'TND (Tunisian Dinar)'), ('TOP', 'TOP (Pa’anga)'), ('TRY', 'TRY (Turkish Lira)'), ('TTD', 'TTD (Trinidad and Tobago Dollar)'), ('TWD', 'TWD (New Taiwan Dollar)'), ('TZS', 'TZS (Tanzanian Shilling)'), ('UAH', 'UAH (Hryvnia)'), ('UGX', 'UGX (Uganda Shilling)'), ('USD', 'USD (US Dollar)'), ('UYU', 'UYU (Peso Uruguayo)'), ('UZS', 'UZS (Uzbekistan Sum)'), ('VEF', 'VEF (Bolívar)'), ('VND', 'VND (Dong)'), ('VUV', 'VUV (Vatu)'), ('WST', 'WST (Tala)'), ('XAF', 'XAF (CFA Franc BEAC)'), ('XAG', 'XAG (Silver)'), ('XAU', 'XAU (Gold)'), ('XBA', 'XBA (Bond Markets Unit European Composite Unit (EURCO))'), ('XBB', 'XBB (Bond Markets Unit European Monetary Unit (E.M.U.-6))'), ('XBC', 'XBC (Bond Markets Unit European Unit of Account 9 (E.U.A.-9))'), ('XBD', 'XBD (Bond Markets Unit European Unit of Account 17 (E.U.A.-17))'), ('XCD', 'XCD (East Caribbean Dollar)'), ('XDR', 'XDR (SDR (Special Drawing Right))'), ('XOF', 'XOF (CFA Franc BCEAO)'), ('XPD', 'XPD (Palladium)'), ('XPF', 'XPF (CFP Franc)'), ('XPT', 'XPT (Platinum)'), ('XSU', 'XSU (Sucre)'), ('XTS', 'XTS (Codes specifically reserved for testing purposes)'), ('XUA', 'XUA (ADB Unit of Account)'), ('XXX', 'XXX (The codes assigned for transactions where no currency is involved)'), ('YER', 'YER (Yemeni Rial)'), ('ZAR', 'ZAR (Rand)'), ('ZMW', 'ZMW (Zambian Kwacha)'), ('ZWL', 'ZWL (Zimbabwe Dollar)')], help_text='The currency used when making a transaction.', max_length=4)),
('transaction_xe_rate', models.DecimalField(blank=True, decimal_places=4, help_text='Currency exchange rate from document currency to transaction_currency.', max_digits=16, null=True)),
('transaction_xe_date', models.DateField(blank=True, help_text='Date of the transaction exchange rate.', null=True)),
('state', django_fsm.FSMField(choices=[('draft', 'Draft'), ('issued', 'Issued'), ('paid', 'Paid'), ('canceled', 'Canceled')], default='draft', help_text='The state the invoice is in.', max_length=10, verbose_name='State')),
('_total', models.DecimalField(blank=True, decimal_places=2, max_digits=19, null=True)),
('_total_in_transaction_currency', models.DecimalField(blank=True, decimal_places=2, max_digits=19, null=True)),
],
options={
'ordering': ('-issue_date', 'series', '-number'),
},
),
migrations.CreateModel(
name='BillingLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('billing_date', models.DateField(help_text='The date when the invoice/proforma was issued.')),
('plan_billed_up_to', models.DateField(help_text='The date up to which the plan base amount has been billed.')),
('metered_features_billed_up_to', models.DateField(help_text='The date up to which the metered features have been billed.')),
('total', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True)),
('plan_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True)),
('metered_features_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('invoice', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoice_billing_logs', to='silver.BillingDocumentBase')),
('proforma', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='proforma_billing_logs', to='silver.BillingDocumentBase')),
],
options={
'ordering': ['-billing_date'],
},
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('live', livefield.fields.LiveField(default=True)),
('company', models.CharField(blank=True, max_length=128, null=True)),
('address_1', models.CharField(max_length=128)),
('address_2', models.CharField(blank=True, max_length=128, null=True)),
('country', models.CharField(choices=[('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AF', 'Afghanistan'), ('AG', 'Antigua and Barbuda'), ('AI', 'Anguilla'), ('AL', 'Albania'), ('AM', 'Armenia'), ('AO', 'Angola'), ('AQ', 'Antarctica'), ('AR', 'Argentina'), ('AS', 'American Samoa'), ('AT', 'Austria'), ('AU', 'Australia'), ('AW', 'Aruba'), ('AX', 'Åland Islands'), ('AZ', 'Azerbaijan'), ('BA', 'Bosnia and Herzegovina'), ('BB', 'Barbados'), ('BD', 'Bangladesh'), ('BE', 'Belgium'), ('BF', 'Burkina Faso'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BI', 'Burundi'), ('BJ', 'Benin'), ('BL', 'Saint Barthélemy'), ('BM', 'Bermuda'), ('BN', 'Brunei Darussalam'), ('BO', 'Bolivia, Plurinational State of'), ('BQ', 'Bonaire, Sint Eustatius and Saba'), ('BR', 'Brazil'), ('BS', 'Bahamas'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CD', 'Congo, The Democratic Republic of the'), ('CF', 'Central African Republic'), ('CG', 'Congo'), ('CH', 'Switzerland'), ('CI', "Côte d'Ivoire"), ('CK', 'Cook Islands'), ('CL', 'Chile'), ('CM', 'Cameroon'), ('CN', 'China'), ('CO', 'Colombia'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CV', 'Cabo Verde'), ('CW', 'Curaçao'), ('CX', 'Christmas Island'), ('CY', 'Cyprus'), ('CZ', 'Czechia'), ('DE', 'Germany'), ('DJ', 'Djibouti'), ('DK', 'Denmark'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EE', 'Estonia'), ('EG', 'Egypt'), ('EH', 'Western Sahara'), ('ER', 'Eritrea'), ('ES', 'Spain'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands (Malvinas)'), ('FM', 'Micronesia, Federated States of'), ('FO', 'Faroe Islands'), ('FR', 'France'), ('GA', 'Gabon'), ('GB', 'United Kingdom'), ('GD', 'Grenada'), ('GE', 'Georgia'), ('GF', 'French Guiana'), ('GG', 'Guernsey'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GL', 'Greenland'), ('GM', 'Gambia'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GS', 'South Georgia and the South Sandwich Islands'), ('GT', 'Guatemala'), ('GU', 'Guam'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard Island and McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IM', 'Isle of Man'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IQ', 'Iraq'), ('IR', 'Iran, Islamic Republic of'), ('IS', 'Iceland'), ('IT', 'Italy'), ('JE', 'Jersey'), ('JM', 'Jamaica'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KM', 'Comoros'), ('KN', 'Saint Kitts and Nevis'), ('KP', "Korea, Democratic People's Republic of"), ('KR', 'Korea, Republic of'), ('KW', 'Kuwait'), ('KY', 'Cayman Islands'), ('KZ', 'Kazakhstan'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LR', 'Liberia'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('LY', 'Libya'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova, Republic of'), ('ME', 'Montenegro'), ('MF', 'Saint Martin (French part)'), ('MG', 'Madagascar'), ('MH', 'Marshall Islands'), ('MK', 'Macedonia, Republic of'), ('ML', 'Mali'), ('MM', 'Myanmar'), ('MN', 'Mongolia'), ('MO', 'Macao'), ('MP', 'Northern Mariana Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MS', 'Montserrat'), ('MT', 'Malta'), ('MU', 'Mauritius'), ('MV', 'Maldives'), ('MW', 'Malawi'), ('MX', 'Mexico'), ('MY', 'Malaysia'), ('MZ', 'Mozambique'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NU', 'Niue'), ('NZ', 'New Zealand'), ('OM', 'Oman'), ('PA', 'Panama'), ('PE', 'Peru'), ('PF', 'French Polynesia'), ('PG', 'Papua New Guinea'), ('PH', 'Philippines'), ('PK', 'Pakistan'), ('PL', 'Poland'), ('PM', 'Saint Pierre and Miquelon'), ('PN', 'Pitcairn'), ('PR', 'Puerto Rico'), ('PS', 'Palestine, State of'), ('PT', 'Portugal'), ('PW', 'Palau'), ('PY', 'Paraguay'), ('QA', 'Qatar'), ('RE', 'Réunion'), ('RO', 'Romania'), ('RS', 'Serbia'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SB', 'Solomon Islands'), ('SC', 'Seychelles'), ('SD', 'Sudan'), ('SE', 'Sweden'), ('SG', 'Singapore'), ('SH', 'Saint Helena, Ascension and Tristan da Cunha'), ('SI', 'Slovenia'), ('SJ', 'Svalbard and Jan Mayen'), ('SK', 'Slovakia'), ('SL', 'Sierra Leone'), ('SM', 'San Marino'), ('SN', 'Senegal'), ('SO', 'Somalia'), ('SR', 'Suriname'), ('SS', 'South Sudan'), ('ST', 'Sao Tome and Principe'), ('SV', 'El Salvador'), ('SX', 'Sint Maarten (Dutch part)'), ('SY', 'Syrian Arab Republic'), ('SZ', 'Swaziland'), ('TC', 'Turks and Caicos Islands'), ('TD', 'Chad'), ('TF', 'French Southern Territories'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TL', 'Timor-Leste'), ('TM', 'Turkmenistan'), ('TN', 'Tunisia'), ('TO', 'Tonga'), ('TR', 'Turkey'), ('TT', 'Trinidad and Tobago'), ('TV', 'Tuvalu'), ('TW', 'Taiwan, Province of China'), ('TZ', 'Tanzania, United Republic of'), ('UA', 'Ukraine'), ('UG', 'Uganda'), ('UM', 'United States Minor Outlying Islands'), ('US', 'United States'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VA', 'Holy See (Vatican City State)'), ('VC', 'Saint Vincent and the Grenadines'), ('VE', 'Venezuela, Bolivarian Republic of'), ('VG', 'Virgin Islands, British'), ('VI', 'Virgin Islands, U.S.'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis and Futuna'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('YT', 'Mayotte'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')], max_length=3)),
('phone', models.CharField(blank=True, max_length=32, null=True)),
('email', models.CharField(blank=True, max_length=254, null=True)),
('city', models.CharField(max_length=128)),
('state', models.CharField(blank=True, max_length=128, null=True)),
('zip_code', models.CharField(blank=True, max_length=32, null=True)),
('extra', models.TextField(blank=True, help_text='Extra information to display on the invoice (markdown formatted).', null=True)),
('meta', annoying.fields.JSONField(blank=True, default={}, deserializer=json.loads, null=True, serializer=annoying.fields.dumps)),
('first_name', models.CharField(help_text="The customer's first name.", max_length=128)),
('last_name', models.CharField(help_text="The customer's last name.", max_length=128)),
('payment_due_days', models.PositiveIntegerField(default=5, help_text='Due days for generated proforma/invoice.')),
('consolidated_billing', models.BooleanField(default=False, help_text='A flag indicating consolidated billing.')),
('customer_reference', models.CharField(blank=True, help_text="It's a reference to be passed between silver and clients. It usually points to an account ID.", max_length=256, null=True, validators=[django.core.validators.RegexValidator(message='Reference must not contain commas.', regex='^[^,]*$')])),
('sales_tax_number', models.CharField(blank=True, max_length=64, null=True)),
('sales_tax_percent', models.DecimalField(blank=True, decimal_places=2, help_text="Whenever to add sales tax. If null, it won't show up on the invoice.", max_digits=4, null=True, validators=[django.core.validators.MinValueValidator(0.0)])),
('sales_tax_name', models.CharField(blank=True, help_text="Sales tax name (eg. 'sales tax' or 'VAT').", max_length=64, null=True)),
('currency', models.CharField(blank=True, choices=[('AED', 'AED (UAE Dirham)'), ('AFN', 'AFN (Afghani)'), ('ALL', 'ALL (Lek)'), ('AMD', 'AMD (Armenian Dram)'), ('ANG', 'ANG (Netherlands Antillean Guilder)'), ('AOA', 'AOA (Kwanza)'), ('ARS', 'ARS (Argentine Peso)'), ('AUD', 'AUD (Australian Dollar)'), ('AWG', 'AWG (Aruban Florin)'), ('AZN', 'AZN (Azerbaijanian Manat)'), ('BAM', 'BAM (Convertible Mark)'), ('BBD', 'BBD (Barbados Dollar)'), ('BDT', 'BDT (Taka)'), ('BGN', 'BGN (Bulgarian Lev)'), ('BHD', 'BHD (Bahraini Dinar)'), ('BIF', 'BIF (Burundi Franc)'), ('BMD', 'BMD (Bermudian Dollar)'), ('BND', 'BND (Brunei Dollar)'), ('BOB', 'BOB (Boliviano)'), ('BRL', 'BRL (Brazilian Real)'), ('BSD', 'BSD (Bahamian Dollar)'), ('BTN', 'BTN (Ngultrum)'), ('BWP', 'BWP (Pula)'), ('BYN', 'BYN (Belarusian Ruble)'), ('BZD', 'BZD (Belize Dollar)'), ('CAD', 'CAD (Canadian Dollar)'), ('CDF', 'CDF (Congolese Franc)'), ('CHF', 'CHF (Swiss Franc)'), ('CLP', 'CLP (Chilean Peso)'), ('CNY', 'CNY (Yuan Renminbi)'), ('COP', 'COP (Colombian Peso)'), ('CRC', 'CRC (Costa Rican Colon)'), ('CUC', 'CUC (Peso Convertible)'), ('CUP', 'CUP (Cuban Peso)'), ('CVE', 'CVE (Cabo Verde Escudo)'), ('CZK', 'CZK (Czech Koruna)'), ('DJF', 'DJF (Djibouti Franc)'), ('DKK', 'DKK (Danish Krone)'), ('DOP', 'DOP (Dominican Peso)'), ('DZD', 'DZD (Algerian Dinar)'), ('EGP', 'EGP (Egyptian Pound)'), ('ERN', 'ERN (Nakfa)'), ('ETB', 'ETB (Ethiopian Birr)'), ('EUR', 'EUR (Euro)'), ('FJD', 'FJD (Fiji Dollar)'), ('FKP', 'FKP (Falkland Islands Pound)'), ('GBP', 'GBP (Pound Sterling)'), ('GEL', 'GEL (Lari)'), ('GHS', 'GHS (Ghana Cedi)'), ('GIP', 'GIP (Gibraltar Pound)'), ('GMD', 'GMD (Dalasi)'), ('GNF', 'GNF (Guinea Franc)'), ('GTQ', 'GTQ (Quetzal)'), ('GYD', 'GYD (Guyana Dollar)'), ('HKD', 'HKD (Hong Kong Dollar)'), ('HNL', 'HNL (Lempira)'), ('HRK', 'HRK (Kuna)'), ('HTG', 'HTG (Gourde)'), ('HUF', 'HUF (Forint)'), ('IDR', 'IDR (Rupiah)'), ('ILS', 'ILS (New Israeli Sheqel)'), ('INR', 'INR (Indian Rupee)'), ('IQD', 'IQD (Iraqi Dinar)'), ('IRR', 'IRR (Iranian Rial)'), ('ISK', 'ISK (Iceland Krona)'), ('JMD', 'JMD (Jamaican Dollar)'), ('JOD', 'JOD (Jordanian Dinar)'), ('JPY', 'JPY (Yen)'), ('KES', 'KES (Kenyan Shilling)'), ('KGS', 'KGS (Som)'), ('KHR', 'KHR (Riel)'), ('KMF', 'KMF (Comoro Franc)'), ('KPW', 'KPW (North Korean Won)'), ('KRW', 'KRW (Won)'), ('KWD', 'KWD (Kuwaiti Dinar)'), ('KYD', 'KYD (Cayman Islands Dollar)'), ('KZT', 'KZT (Tenge)'), ('LAK', 'LAK (Kip)'), ('LBP', 'LBP (Lebanese Pound)'), ('LKR', 'LKR (Sri Lanka Rupee)'), ('LRD', 'LRD (Liberian Dollar)'), ('LSL', 'LSL (Loti)'), ('LYD', 'LYD (Libyan Dinar)'), ('MAD', 'MAD (Moroccan Dirham)'), ('MDL', 'MDL (Moldovan Leu)'), ('MGA', 'MGA (Malagasy Ariary)'), ('MKD', 'MKD (Denar)'), ('MMK', 'MMK (Kyat)'), ('MNT', 'MNT (Tugrik)'), ('MOP', 'MOP (Pataca)'), ('MRO', 'MRO (Ouguiya)'), ('MUR', 'MUR (Mauritius Rupee)'), ('MVR', 'MVR (Rufiyaa)'), ('MWK', 'MWK (Malawi Kwacha)'), ('MXN', 'MXN (Mexican Peso)'), ('MYR', 'MYR (Malaysian Ringgit)'), ('MZN', 'MZN (Mozambique Metical)'), ('NAD', 'NAD (Namibia Dollar)'), ('NGN', 'NGN (Naira)'), ('NIO', 'NIO (Cordoba Oro)'), ('NOK', 'NOK (Norwegian Krone)'), ('NPR', 'NPR (Nepalese Rupee)'), ('NZD', 'NZD (New Zealand Dollar)'), ('OMR', 'OMR (Rial Omani)'), ('PAB', 'PAB (Balboa)'), ('PEN', 'PEN (Sol)'), ('PGK', 'PGK (Kina)'), ('PHP', 'PHP (Philippine Peso)'), ('PKR', 'PKR (Pakistan Rupee)'), ('PLN', 'PLN (Zloty)'), ('PYG', 'PYG (Guarani)'), ('QAR', 'QAR (Qatari Rial)'), ('RON', 'RON (Romanian Leu)'), ('RSD', 'RSD (Serbian Dinar)'), ('RUB', 'RUB (Russian Ruble)'), ('RWF', 'RWF (Rwanda Franc)'), ('SAR', 'SAR (Saudi Riyal)'), ('SBD', 'SBD (Solomon Islands Dollar)'), ('SCR', 'SCR (Seychelles Rupee)'), ('SDG', 'SDG (Sudanese Pound)'), ('SEK', 'SEK (Swedish Krona)'), ('SGD', 'SGD (Singapore Dollar)'), ('SHP', 'SHP (Saint Helena Pound)'), ('SLL', 'SLL (Leone)'), ('SOS', 'SOS (Somali Shilling)'), ('SRD', 'SRD (Surinam Dollar)'), ('SSP', 'SSP (South Sudanese Pound)'), ('STD', 'STD (Dobra)'), ('SVC', 'SVC (El Salvador Colon)'), ('SYP', 'SYP (Syrian Pound)'), ('SZL', 'SZL (Lilangeni)'), ('THB', 'THB (Baht)'), ('TJS', 'TJS (Somoni)'), ('TMT', 'TMT (Turkmenistan New Manat)'), ('TND', 'TND (Tunisian Dinar)'), ('TOP', 'TOP (Pa’anga)'), ('TRY', 'TRY (Turkish Lira)'), ('TTD', 'TTD (Trinidad and Tobago Dollar)'), ('TWD', 'TWD (New Taiwan Dollar)'), ('TZS', 'TZS (Tanzanian Shilling)'), ('UAH', 'UAH (Hryvnia)'), ('UGX', 'UGX (Uganda Shilling)'), ('USD', 'USD (US Dollar)'), ('UYU', 'UYU (Peso Uruguayo)'), ('UZS', 'UZS (Uzbekistan Sum)'), ('VEF', 'VEF (Bolívar)'), ('VND', 'VND (Dong)'), ('VUV', 'VUV (Vatu)'), ('WST', 'WST (Tala)'), ('XAF', 'XAF (CFA Franc BEAC)'), ('XAG', 'XAG (Silver)'), ('XAU', 'XAU (Gold)'), ('XBA', 'XBA (Bond Markets Unit European Composite Unit (EURCO))'), ('XBB', 'XBB (Bond Markets Unit European Monetary Unit (E.M.U.-6))'), ('XBC', 'XBC (Bond Markets Unit European Unit of Account 9 (E.U.A.-9))'), ('XBD', 'XBD (Bond Markets Unit European Unit of Account 17 (E.U.A.-17))'), ('XCD', 'XCD (East Caribbean Dollar)'), ('XDR', 'XDR (SDR (Special Drawing Right))'), ('XOF', 'XOF (CFA Franc BCEAO)'), ('XPD', 'XPD (Palladium)'), ('XPF', 'XPF (CFP Franc)'), ('XPT', 'XPT (Platinum)'), ('XSU', 'XSU (Sucre)'), ('XTS', 'XTS (Codes specifically reserved for testing purposes)'), ('XUA', 'XUA (ADB Unit of Account)'), ('XXX', 'XXX (The codes assigned for transactions where no currency is involved)'), ('YER', 'YER (Yemeni Rial)'), ('ZAR', 'ZAR (Rand)'), ('ZMW', 'ZMW (Zambian Kwacha)'), ('ZWL', 'ZWL (Zimbabwe Dollar)')], help_text='Used to enforce a certain currency when making transactionsfor the customer.', max_length=4, null=True)),
],
options={
'ordering': ['first_name', 'last_name', 'company'],
},
),
migrations.CreateModel(
name='DocumentEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=1024)),
('unit', models.CharField(blank=True, max_length=1024, null=True)),
('quantity', models.DecimalField(decimal_places=4, max_digits=19, validators=[django.core.validators.MinValueValidator(0.0)])),
('unit_price', models.DecimalField(decimal_places=4, max_digits=19)),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('prorated', models.BooleanField(default=False)),
('invoice', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoice_entries', to='silver.BillingDocumentBase')),
],
options={
'verbose_name': 'Entry',
'verbose_name_plural': 'Entries',
},
),
migrations.CreateModel(
name='MeteredFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, help_text='The feature display name.', max_length=200)),
('unit', models.CharField(max_length=20)),
('price_per_unit', models.DecimalField(decimal_places=4, help_text='The price per unit.', max_digits=19, validators=[django.core.validators.MinValueValidator(0.0)])),
('included_units', models.DecimalField(decimal_places=4, help_text='The number of included units per plan interval.', max_digits=19, validators=[django.core.validators.MinValueValidator(0.0)])),
('included_units_during_trial', models.DecimalField(blank=True, decimal_places=4, help_text='The number of included units during the trial period.', max_digits=19, null=True, validators=[django.core.validators.MinValueValidator(0.0)])),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='MeteredFeatureUnitsLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('consumed_units', models.DecimalField(decimal_places=4, max_digits=19, validators=[django.core.validators.MinValueValidator(0.0)])),
('start_date', models.DateField(editable=False)),
('end_date', models.DateField(editable=False)),
('metered_feature', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='consumed', to='silver.MeteredFeature')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('payment_processor', models.CharField(choices=[('manual', 'manual')], max_length=256)),
('added_at', models.DateTimeField(default=django.utils.timezone.now)),
('data', annoying.fields.JSONField(blank=True, default={}, deserializer=json.loads, null=True, serializer=annoying.fields.dumps)),
('verified', models.BooleanField(default=False)),
('canceled', models.BooleanField(default=False)),
('valid_until', models.DateTimeField(blank=True, null=True)),
('display_info', models.CharField(blank=True, max_length=256, null=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='silver.Customer')),
],
options={
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='PDF',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
('pdf_file', models.FileField(blank=True, editable=False, null=True, upload_to=silver.models.documents.pdf.get_upload_path)),
('dirty', models.PositiveIntegerField(default=0)),
('upload_path', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Plan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, help_text='Display name of the plan.', max_length=200)),
('interval', models.CharField(choices=[('day', 'Day'), ('week', 'Week'), ('month', 'Month'), ('year', 'Year')], default='month', help_text='The frequency with which a subscription should be billed.', max_length=12)),
('interval_count', models.PositiveIntegerField(help_text='The number of intervals between each subscription billing')),
('amount', models.DecimalField(decimal_places=4, help_text='The amount in the specified currency to be charged on the interval specified.', max_digits=19, validators=[django.core.validators.MinValueValidator(0.0)])),
('currency', models.CharField(choices=[('AED', 'AED (UAE Dirham)'), ('AFN', 'AFN (Afghani)'), ('ALL', 'ALL (Lek)'), ('AMD', 'AMD (Armenian Dram)'), ('ANG', 'ANG (Netherlands Antillean Guilder)'), ('AOA', 'AOA (Kwanza)'), ('ARS', 'ARS (Argentine Peso)'), ('AUD', 'AUD (Australian Dollar)'), ('AWG', 'AWG (Aruban Florin)'), ('AZN', 'AZN (Azerbaijanian Manat)'), ('BAM', 'BAM (Convertible Mark)'), ('BBD', 'BBD (Barbados Dollar)'), ('BDT', 'BDT (Taka)'), ('BGN', 'BGN (Bulgarian Lev)'), ('BHD', 'BHD (Bahraini Dinar)'), ('BIF', 'BIF (Burundi Franc)'), ('BMD', 'BMD (Bermudian Dollar)'), ('BND', 'BND (Brunei Dollar)'), ('BOB', 'BOB (Boliviano)'), ('BRL', 'BRL (Brazilian Real)'), ('BSD', 'BSD (Bahamian Dollar)'), ('BTN', 'BTN (Ngultrum)'), ('BWP', 'BWP (Pula)'), ('BYN', 'BYN (Belarusian Ruble)'), ('BZD', 'BZD (Belize Dollar)'), ('CAD', 'CAD (Canadian Dollar)'), ('CDF', 'CDF (Congolese Franc)'), ('CHF', 'CHF (Swiss Franc)'), ('CLP', 'CLP (Chilean Peso)'), ('CNY', 'CNY (Yuan Renminbi)'), ('COP', 'COP (Colombian Peso)'), ('CRC', 'CRC (Costa Rican Colon)'), ('CUC', 'CUC (Peso Convertible)'), ('CUP', 'CUP (Cuban Peso)'), ('CVE', 'CVE (Cabo Verde Escudo)'), ('CZK', 'CZK (Czech Koruna)'), ('DJF', 'DJF (Djibouti Franc)'), ('DKK', 'DKK (Danish Krone)'), ('DOP', 'DOP (Dominican Peso)'), ('DZD', 'DZD (Algerian Dinar)'), ('EGP', 'EGP (Egyptian Pound)'), ('ERN', 'ERN (Nakfa)'), ('ETB', 'ETB (Ethiopian Birr)'), ('EUR', 'EUR (Euro)'), ('FJD', 'FJD (Fiji Dollar)'), ('FKP', 'FKP (Falkland Islands Pound)'), ('GBP', 'GBP (Pound Sterling)'), ('GEL', 'GEL (Lari)'), ('GHS', 'GHS (Ghana Cedi)'), ('GIP', 'GIP (Gibraltar Pound)'), ('GMD', 'GMD (Dalasi)'), ('GNF', 'GNF (Guinea Franc)'), ('GTQ', 'GTQ (Quetzal)'), ('GYD', 'GYD (Guyana Dollar)'), ('HKD', 'HKD (Hong Kong Dollar)'), ('HNL', 'HNL (Lempira)'), ('HRK', 'HRK (Kuna)'), ('HTG', 'HTG (Gourde)'), ('HUF', 'HUF (Forint)'), ('IDR', 'IDR (Rupiah)'), ('ILS', 'ILS (New Israeli Sheqel)'), ('INR', 'INR (Indian Rupee)'), ('IQD', 'IQD (Iraqi Dinar)'), ('IRR', 'IRR (Iranian Rial)'), ('ISK', 'ISK (Iceland Krona)'), ('JMD', 'JMD (Jamaican Dollar)'), ('JOD', 'JOD (Jordanian Dinar)'), ('JPY', 'JPY (Yen)'), ('KES', 'KES (Kenyan Shilling)'), ('KGS', 'KGS (Som)'), ('KHR', 'KHR (Riel)'), ('KMF', 'KMF (Comoro Franc)'), ('KPW', 'KPW (North Korean Won)'), ('KRW', 'KRW (Won)'), ('KWD', 'KWD (Kuwaiti Dinar)'), ('KYD', 'KYD (Cayman Islands Dollar)'), ('KZT', 'KZT (Tenge)'), ('LAK', 'LAK (Kip)'), ('LBP', 'LBP (Lebanese Pound)'), ('LKR', 'LKR (Sri Lanka Rupee)'), ('LRD', 'LRD (Liberian Dollar)'), ('LSL', 'LSL (Loti)'), ('LYD', 'LYD (Libyan Dinar)'), ('MAD', 'MAD (Moroccan Dirham)'), ('MDL', 'MDL (Moldovan Leu)'), ('MGA', 'MGA (Malagasy Ariary)'), ('MKD', 'MKD (Denar)'), ('MMK', 'MMK (Kyat)'), ('MNT', 'MNT (Tugrik)'), ('MOP', 'MOP (Pataca)'), ('MRO', 'MRO (Ouguiya)'), ('MUR', 'MUR (Mauritius Rupee)'), ('MVR', 'MVR (Rufiyaa)'), ('MWK', 'MWK (Malawi Kwacha)'), ('MXN', 'MXN (Mexican Peso)'), ('MYR', 'MYR (Malaysian Ringgit)'), ('MZN', 'MZN (Mozambique Metical)'), ('NAD', 'NAD (Namibia Dollar)'), ('NGN', 'NGN (Naira)'), ('NIO', 'NIO (Cordoba Oro)'), ('NOK', 'NOK (Norwegian Krone)'), ('NPR', 'NPR (Nepalese Rupee)'), ('NZD', 'NZD (New Zealand Dollar)'), ('OMR', 'OMR (Rial Omani)'), ('PAB', 'PAB (Balboa)'), ('PEN', 'PEN (Sol)'), ('PGK', 'PGK (Kina)'), ('PHP', 'PHP (Philippine Peso)'), ('PKR', 'PKR (Pakistan Rupee)'), ('PLN', 'PLN (Zloty)'), ('PYG', 'PYG (Guarani)'), ('QAR', 'QAR (Qatari Rial)'), ('RON', 'RON (Romanian Leu)'), ('RSD', 'RSD (Serbian Dinar)'), ('RUB', 'RUB (Russian Ruble)'), ('RWF', 'RWF (Rwanda Franc)'), ('SAR', 'SAR (Saudi Riyal)'), ('SBD', 'SBD (Solomon Islands Dollar)'), ('SCR', 'SCR (Seychelles Rupee)'), ('SDG', 'SDG (Sudanese Pound)'), ('SEK', 'SEK (Swedish Krona)'), ('SGD', 'SGD (Singapore Dollar)'), ('SHP', 'SHP (Saint Helena Pound)'), ('SLL', 'SLL (Leone)'), ('SOS', 'SOS (Somali Shilling)'), ('SRD', 'SRD (Surinam Dollar)'), ('SSP', 'SSP (South Sudanese Pound)'), ('STD', 'STD (Dobra)'), ('SVC', 'SVC (El Salvador Colon)'), ('SYP', 'SYP (Syrian Pound)'), ('SZL', 'SZL (Lilangeni)'), ('THB', 'THB (Baht)'), ('TJS', 'TJS (Somoni)'), ('TMT', 'TMT (Turkmenistan New Manat)'), ('TND', 'TND (Tunisian Dinar)'), ('TOP', 'TOP (Pa’anga)'), ('TRY', 'TRY (Turkish Lira)'), ('TTD', 'TTD (Trinidad and Tobago Dollar)'), ('TWD', 'TWD (New Taiwan Dollar)'), ('TZS', 'TZS (Tanzanian Shilling)'), ('UAH', 'UAH (Hryvnia)'), ('UGX', 'UGX (Uganda Shilling)'), ('USD', 'USD (US Dollar)'), ('UYU', 'UYU (Peso Uruguayo)'), ('UZS', 'UZS (Uzbekistan Sum)'), ('VEF', 'VEF (Bolívar)'), ('VND', 'VND (Dong)'), ('VUV', 'VUV (Vatu)'), ('WST', 'WST (Tala)'), ('XAF', 'XAF (CFA Franc BEAC)'), ('XAG', 'XAG (Silver)'), ('XAU', 'XAU (Gold)'), ('XBA', 'XBA (Bond Markets Unit European Composite Unit (EURCO))'), ('XBB', 'XBB (Bond Markets Unit European Monetary Unit (E.M.U.-6))'), ('XBC', 'XBC (Bond Markets Unit European Unit of Account 9 (E.U.A.-9))'), ('XBD', 'XBD (Bond Markets Unit European Unit of Account 17 (E.U.A.-17))'), ('XCD', 'XCD (East Caribbean Dollar)'), ('XDR', 'XDR (SDR (Special Drawing Right))'), ('XOF', 'XOF (CFA Franc BCEAO)'), ('XPD', 'XPD (Palladium)'), ('XPF', 'XPF (CFP Franc)'), ('XPT', 'XPT (Platinum)'), ('XSU', 'XSU (Sucre)'), ('XTS', 'XTS (Codes specifically reserved for testing purposes)'), ('XUA', 'XUA (ADB Unit of Account)'), ('XXX', 'XXX (The codes assigned for transactions where no currency is involved)'), ('YER', 'YER (Yemeni Rial)'), ('ZAR', 'ZAR (Rand)'), ('ZMW', 'ZMW (Zambian Kwacha)'), ('ZWL', 'ZWL (Zimbabwe Dollar)')], default='USD', help_text='The currency in which the subscription will be charged.', max_length=4)),
('trial_period_days', models.PositiveIntegerField(blank=True, help_text='Number of trial period days granted when subscribing a customer to this plan.', null=True, verbose_name='Trial days')),
('generate_documents_on_trial_end', models.NullBooleanField(help_text='If this is set to True, then billing documents will be generated when the subscription trial ends, instead of waiting for the end of the billing cycle.')),
('separate_cycles_during_trial', models.NullBooleanField(help_text='If this is set to True, then the trial period cycle will be split if it spans across multiple billing intervals.')),
('prebill_plan', models.NullBooleanField(help_text='If this is set to True, then the plan base amount will be billed at thebeginning of the billing cycle rather than after the end.')),
('generate_after', models.PositiveIntegerField(default=0, help_text='Number of seconds to wait after current billing cycle ends before generating the invoice. This can be used to allow systems to finish updating feature counters.')),
('cycle_billing_duration', models.DurationField(blank=True, help_text="This can be used to ensure that the billing date doesn't pass a certain date.\nFor example if this field is set to 2 days, for a monthly subscription, the billing date will never surpass the 2nd day of the month. Billing documents can still be generated after that day during the billing cycle, but their billing date will appear to be the end of the cycle billing duration.", null=True)),
('enabled', models.BooleanField(default=True, help_text='Whether to accept subscriptions.')),
('private', models.BooleanField(default=False, help_text='Indicates if a plan is private.')),
('metered_features', models.ManyToManyField(blank=True, help_text="A list of the plan's metered features.", to='silver.MeteredFeature')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='ProductCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=128, unique=True)),
],
),
migrations.CreateModel(
name='Provider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('live', livefield.fields.LiveField(default=True)),
('company', models.CharField(blank=True, max_length=128, null=True)),
('address_1', models.CharField(max_length=128)),
('address_2', models.CharField(blank=True, max_length=128, null=True)),
('country', models.CharField(choices=[('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AF', 'Afghanistan'), ('AG', 'Antigua and Barbuda'), ('AI', 'Anguilla'), ('AL', 'Albania'), ('AM', 'Armenia'), ('AO', 'Angola'), ('AQ', 'Antarctica'), ('AR', 'Argentina'), ('AS', 'American Samoa'), ('AT', 'Austria'), ('AU', 'Australia'), ('AW', 'Aruba'), ('AX', 'Åland Islands'), ('AZ', 'Azerbaijan'), ('BA', 'Bosnia and Herzegovina'), ('BB', 'Barbados'), ('BD', 'Bangladesh'), ('BE', 'Belgium'), ('BF', 'Burkina Faso'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BI', 'Burundi'), ('BJ', 'Benin'), ('BL', 'Saint Barthélemy'), ('BM', 'Bermuda'), ('BN', 'Brunei Darussalam'), ('BO', 'Bolivia, Plurinational State of'), ('BQ', 'Bonaire, Sint Eustatius and Saba'), ('BR', 'Brazil'), ('BS', 'Bahamas'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CD', 'Congo, The Democratic Republic of the'), ('CF', 'Central African Republic'), ('CG', 'Congo'), ('CH', 'Switzerland'), ('CI', "Côte d'Ivoire"), ('CK', 'Cook Islands'), ('CL', 'Chile'), ('CM', 'Cameroon'), ('CN', 'China'), ('CO', 'Colombia'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CV', 'Cabo Verde'), ('CW', 'Curaçao'), ('CX', 'Christmas Island'), ('CY', 'Cyprus'), ('CZ', 'Czechia'), ('DE', 'Germany'), ('DJ', 'Djibouti'), ('DK', 'Denmark'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EE', 'Estonia'), ('EG', 'Egypt'), ('EH', 'Western Sahara'), ('ER', 'Eritrea'), ('ES', 'Spain'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands (Malvinas)'), ('FM', 'Micronesia, Federated States of'), ('FO', 'Faroe Islands'), ('FR', 'France'), ('GA', 'Gabon'), ('GB', 'United Kingdom'), ('GD', 'Grenada'), ('GE', 'Georgia'), ('GF', 'French Guiana'), ('GG', 'Guernsey'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GL', 'Greenland'), ('GM', 'Gambia'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GS', 'South Georgia and the South Sandwich Islands'), ('GT', 'Guatemala'), ('GU', 'Guam'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard Island and McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IM', 'Isle of Man'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IQ', 'Iraq'), ('IR', 'Iran, Islamic Republic of'), ('IS', 'Iceland'), ('IT', 'Italy'), ('JE', 'Jersey'), ('JM', 'Jamaica'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KM', 'Comoros'), ('KN', 'Saint Kitts and Nevis'), ('KP', "Korea, Democratic People's Republic of"), ('KR', 'Korea, Republic of'), ('KW', 'Kuwait'), ('KY', 'Cayman Islands'), ('KZ', 'Kazakhstan'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LR', 'Liberia'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('LY', 'Libya'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova, Republic of'), ('ME', 'Montenegro'), ('MF', 'Saint Martin (French part)'), ('MG', 'Madagascar'), ('MH', 'Marshall Islands'), ('MK', 'Macedonia, Republic of'), ('ML', 'Mali'), ('MM', 'Myanmar'), ('MN', 'Mongolia'), ('MO', 'Macao'), ('MP', 'Northern Mariana Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MS', 'Montserrat'), ('MT', 'Malta'), ('MU', 'Mauritius'), ('MV', 'Maldives'), ('MW', 'Malawi'), ('MX', 'Mexico'), ('MY', 'Malaysia'), ('MZ', 'Mozambique'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NU', 'Niue'), ('NZ', 'New Zealand'), ('OM', 'Oman'), ('PA', 'Panama'), ('PE', 'Peru'), ('PF', 'French Polynesia'), ('PG', 'Papua New Guinea'), ('PH', 'Philippines'), ('PK', 'Pakistan'), ('PL', 'Poland'), ('PM', 'Saint Pierre and Miquelon'), ('PN', 'Pitcairn'), ('PR', 'Puerto Rico'), ('PS', 'Palestine, State of'), ('PT', 'Portugal'), ('PW', 'Palau'), ('PY', 'Paraguay'), ('QA', 'Qatar'), ('RE', 'Réunion'), ('RO', 'Romania'), ('RS', 'Serbia'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SB', 'Solomon Islands'), ('SC', 'Seychelles'), ('SD', 'Sudan'), ('SE', 'Sweden'), ('SG', 'Singapore'), ('SH', 'Saint Helena, Ascension and Tristan da Cunha'), ('SI', 'Slovenia'), ('SJ', 'Svalbard and Jan Mayen'), ('SK', 'Slovakia'), ('SL', 'Sierra Leone'), ('SM', 'San Marino'), ('SN', 'Senegal'), ('SO', 'Somalia'), ('SR', 'Suriname'), ('SS', 'South Sudan'), ('ST', 'Sao Tome and Principe'), ('SV', 'El Salvador'), ('SX', 'Sint Maarten (Dutch part)'), ('SY', 'Syrian Arab Republic'), ('SZ', 'Swaziland'), ('TC', 'Turks and Caicos Islands'), ('TD', 'Chad'), ('TF', 'French Southern Territories'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TL', 'Timor-Leste'), ('TM', 'Turkmenistan'), ('TN', 'Tunisia'), ('TO', 'Tonga'), ('TR', 'Turkey'), ('TT', 'Trinidad and Tobago'), ('TV', 'Tuvalu'), ('TW', 'Taiwan, Province of China'), ('TZ', 'Tanzania, United Republic of'), ('UA', 'Ukraine'), ('UG', 'Uganda'), ('UM', 'United States Minor Outlying Islands'), ('US', 'United States'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VA', 'Holy See (Vatican City State)'), ('VC', 'Saint Vincent and the Grenadines'), ('VE', 'Venezuela, Bolivarian Republic of'), ('VG', 'Virgin Islands, British'), ('VI', 'Virgin Islands, U.S.'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis and Futuna'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('YT', 'Mayotte'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')], max_length=3)),
('phone', models.CharField(blank=True, max_length=32, null=True)),
('email', models.CharField(blank=True, max_length=254, null=True)),
('city', models.CharField(max_length=128)),
('state', models.CharField(blank=True, max_length=128, null=True)),
('zip_code', models.CharField(blank=True, max_length=32, null=True)),
('extra', models.TextField(blank=True, help_text='Extra information to display on the invoice (markdown formatted).', null=True)),
('meta', annoying.fields.JSONField(blank=True, default={}, deserializer=json.loads, null=True, serializer=annoying.fields.dumps)),
('name', models.CharField(help_text='The name to be used for billing purposes.', max_length=128)),
('flow', models.CharField(choices=[('proforma', 'Proforma'), ('invoice', 'Invoice')], default='proforma', help_text='One of the available workflows for generating proformas and invoices (see the documentation for more details).', max_length=10)),
('invoice_series', models.CharField(help_text='The series that will be used on every invoice generated by this provider.', max_length=20)),
('invoice_starting_number', models.PositiveIntegerField()),
('proforma_series', models.CharField(blank=True, help_text='The series that will be used on every proforma generated by this provider.', max_length=20, null=True)),
('proforma_starting_number', models.PositiveIntegerField(blank=True, null=True)),
('default_document_state', models.CharField(choices=[('draft', 'Draft'), ('issued', 'Issued')], default='draft', help_text='The default state of the auto-generated documents.', max_length=10)),
('generate_documents_on_trial_end', models.BooleanField(default=True, help_text='If this is set to True, then billing documents will be generated when the subscription trial ends, instead of waiting for the end of the billing cycle.')),
('separate_cycles_during_trial', models.BooleanField(default=False, help_text='If this is set to True, then the trial period cycle will be split if it spans across multiple billing intervals.')),
('prebill_plan', models.BooleanField(default=True, help_text='If this is set to True, then the plan base amount will be billed at thebeginning of the billing cycle rather than after the end.')),
('cycle_billing_duration', models.DurationField(blank=True, help_text="This can be used to ensure that the billing date doesn't pass a certain date.\nFor example if this field is set to 2 days, for a monthly subscription, the billing date will never surpass the 2nd day of the month. Billing documents can still be generated after that day during the billing cycle, but their billing date will appear to be the end of the cycle billing duration.", null=True)),
],
options={
'ordering': ['name', 'company'],
},
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=1024, null=True)),
('trial_end', models.DateField(blank=True, help_text='The date at which the trial ends. If set, overrides the computed trial end date from the plan.', null=True)),
('start_date', models.DateField(blank=True, help_text='The starting date for the subscription.', null=True)),
('cancel_date', models.DateField(blank=True, help_text='The date when the subscription was canceled.', null=True)),
('ended_at', models.DateField(blank=True, help_text='The date when the subscription ended.', null=True)),
('reference', models.CharField(blank=True, help_text="The subscription's reference in an external system.", max_length=128, null=True, validators=[django.core.validators.RegexValidator(message='Reference must not contain commas.', regex='^[^,]*$')])),
('state', django_fsm.FSMField(choices=[('active', 'Active'), ('inactive', 'Inactive'), ('canceled', 'Canceled'), ('ended', 'Ended')], default='inactive', help_text='The state the subscription is in.', max_length=12, protected=True)),
('meta', annoying.fields.JSONField(blank=True, default={}, deserializer=json.loads, null=True, serializer=annoying.fields.dumps)),
('customer', models.ForeignKey(help_text='The customer who is subscribed to the plan.', on_delete=django.db.models.deletion.CASCADE, related_name='subscriptions', to='silver.Customer')),
('plan', models.ForeignKey(help_text='The plan the customer is subscribed to.', on_delete=django.db.models.deletion.CASCADE, to='silver.Plan')),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('currency', models.CharField(choices=[('AED', 'AED (UAE Dirham)'), ('AFN', 'AFN (Afghani)'), ('ALL', 'ALL (Lek)'), ('AMD', 'AMD (Armenian Dram)'), ('ANG', 'ANG (Netherlands Antillean Guilder)'), ('AOA', 'AOA (Kwanza)'), ('ARS', 'ARS (Argentine Peso)'), ('AUD', 'AUD (Australian Dollar)'), ('AWG', 'AWG (Aruban Florin)'), ('AZN', 'AZN (Azerbaijanian Manat)'), ('BAM', 'BAM (Convertible Mark)'), ('BBD', 'BBD (Barbados Dollar)'), ('BDT', 'BDT (Taka)'), ('BGN', 'BGN (Bulgarian Lev)'), ('BHD', 'BHD (Bahraini Dinar)'), ('BIF', 'BIF (Burundi Franc)'), ('BMD', 'BMD (Bermudian Dollar)'), ('BND', 'BND (Brunei Dollar)'), ('BOB', 'BOB (Boliviano)'), ('BRL', 'BRL (Brazilian Real)'), ('BSD', 'BSD (Bahamian Dollar)'), ('BTN', 'BTN (Ngultrum)'), ('BWP', 'BWP (Pula)'), ('BYN', 'BYN (Belarusian Ruble)'), ('BZD', 'BZD (Belize Dollar)'), ('CAD', 'CAD (Canadian Dollar)'), ('CDF', 'CDF (Congolese Franc)'), ('CHF', 'CHF (Swiss Franc)'), ('CLP', 'CLP (Chilean Peso)'), ('CNY', 'CNY (Yuan Renminbi)'), ('COP', 'COP (Colombian Peso)'), ('CRC', 'CRC (Costa Rican Colon)'), ('CUC', 'CUC (Peso Convertible)'), ('CUP', 'CUP (Cuban Peso)'), ('CVE', 'CVE (Cabo Verde Escudo)'), ('CZK', 'CZK (Czech Koruna)'), ('DJF', 'DJF (Djibouti Franc)'), ('DKK', 'DKK (Danish Krone)'), ('DOP', 'DOP (Dominican Peso)'), ('DZD', 'DZD (Algerian Dinar)'), ('EGP', 'EGP (Egyptian Pound)'), ('ERN', 'ERN (Nakfa)'), ('ETB', 'ETB (Ethiopian Birr)'), ('EUR', 'EUR (Euro)'), ('FJD', 'FJD (Fiji Dollar)'), ('FKP', 'FKP (Falkland Islands Pound)'), ('GBP', 'GBP (Pound Sterling)'), ('GEL', 'GEL (Lari)'), ('GHS', 'GHS (Ghana Cedi)'), ('GIP', 'GIP (Gibraltar Pound)'), ('GMD', 'GMD (Dalasi)'), ('GNF', 'GNF (Guinea Franc)'), ('GTQ', 'GTQ (Quetzal)'), ('GYD', 'GYD (Guyana Dollar)'), ('HKD', 'HKD (Hong Kong Dollar)'), ('HNL', 'HNL (Lempira)'), ('HRK', 'HRK (Kuna)'), ('HTG', 'HTG (Gourde)'), ('HUF', 'HUF (Forint)'), ('IDR', 'IDR (Rupiah)'), ('ILS', 'ILS (New Israeli Sheqel)'), ('INR', 'INR (Indian Rupee)'), ('IQD', 'IQD (Iraqi Dinar)'), ('IRR', 'IRR (Iranian Rial)'), ('ISK', 'ISK (Iceland Krona)'), ('JMD', 'JMD (Jamaican Dollar)'), ('JOD', 'JOD (Jordanian Dinar)'), ('JPY', 'JPY (Yen)'), ('KES', 'KES (Kenyan Shilling)'), ('KGS', 'KGS (Som)'), ('KHR', 'KHR (Riel)'), ('KMF', 'KMF (Comoro Franc)'), ('KPW', 'KPW (North Korean Won)'), ('KRW', 'KRW (Won)'), ('KWD', 'KWD (Kuwaiti Dinar)'), ('KYD', 'KYD (Cayman Islands Dollar)'), ('KZT', 'KZT (Tenge)'), ('LAK', 'LAK (Kip)'), ('LBP', 'LBP (Lebanese Pound)'), ('LKR', 'LKR (Sri Lanka Rupee)'), ('LRD', 'LRD (Liberian Dollar)'), ('LSL', 'LSL (Loti)'), ('LYD', 'LYD (Libyan Dinar)'), ('MAD', 'MAD (Moroccan Dirham)'), ('MDL', 'MDL (Moldovan Leu)'), ('MGA', 'MGA (Malagasy Ariary)'), ('MKD', 'MKD (Denar)'), ('MMK', 'MMK (Kyat)'), ('MNT', 'MNT (Tugrik)'), ('MOP', 'MOP (Pataca)'), ('MRO', 'MRO (Ouguiya)'), ('MUR', 'MUR (Mauritius Rupee)'), ('MVR', 'MVR (Rufiyaa)'), ('MWK', 'MWK (Malawi Kwacha)'), ('MXN', 'MXN (Mexican Peso)'), ('MYR', 'MYR (Malaysian Ringgit)'), ('MZN', 'MZN (Mozambique Metical)'), ('NAD', 'NAD (Namibia Dollar)'), ('NGN', 'NGN (Naira)'), ('NIO', 'NIO (Cordoba Oro)'), ('NOK', 'NOK (Norwegian Krone)'), ('NPR', 'NPR (Nepalese Rupee)'), ('NZD', 'NZD (New Zealand Dollar)'), ('OMR', 'OMR (Rial Omani)'), ('PAB', 'PAB (Balboa)'), ('PEN', 'PEN (Sol)'), ('PGK', 'PGK (Kina)'), ('PHP', 'PHP (Philippine Peso)'), ('PKR', 'PKR (Pakistan Rupee)'), ('PLN', 'PLN (Zloty)'), ('PYG', 'PYG (Guarani)'), ('QAR', 'QAR (Qatari Rial)'), ('RON', 'RON (Romanian Leu)'), ('RSD', 'RSD (Serbian Dinar)'), ('RUB', 'RUB (Russian Ruble)'), ('RWF', 'RWF (Rwanda Franc)'), ('SAR', 'SAR (Saudi Riyal)'), ('SBD', 'SBD (Solomon Islands Dollar)'), ('SCR', 'SCR (Seychelles Rupee)'), ('SDG', 'SDG (Sudanese Pound)'), ('SEK', 'SEK (Swedish Krona)'), ('SGD', 'SGD (Singapore Dollar)'), ('SHP', 'SHP (Saint Helena Pound)'), ('SLL', 'SLL (Leone)'), ('SOS', 'SOS (Somali Shilling)'), ('SRD', 'SRD (Surinam Dollar)'), ('SSP', 'SSP (South Sudanese Pound)'), ('STD', 'STD (Dobra)'), ('SVC', 'SVC (El Salvador Colon)'), ('SYP', 'SYP (Syrian Pound)'), ('SZL', 'SZL (Lilangeni)'), ('THB', 'THB (Baht)'), ('TJS', 'TJS (Somoni)'), ('TMT', 'TMT (Turkmenistan New Manat)'), ('TND', 'TND (Tunisian Dinar)'), ('TOP', 'TOP (Pa’anga)'), ('TRY', 'TRY (Turkish Lira)'), ('TTD', 'TTD (Trinidad and Tobago Dollar)'), ('TWD', 'TWD (New Taiwan Dollar)'), ('TZS', 'TZS (Tanzanian Shilling)'), ('UAH', 'UAH (Hryvnia)'), ('UGX', 'UGX (Uganda Shilling)'), ('USD', 'USD (US Dollar)'), ('UYU', 'UYU (Peso Uruguayo)'), ('UZS', 'UZS (Uzbekistan Sum)'), ('VEF', 'VEF (Bolívar)'), ('VND', 'VND (Dong)'), ('VUV', 'VUV (Vatu)'), ('WST', 'WST (Tala)'), ('XAF', 'XAF (CFA Franc BEAC)'), ('XAG', 'XAG (Silver)'), ('XAU', 'XAU (Gold)'), ('XBA', 'XBA (Bond Markets Unit European Composite Unit (EURCO))'), ('XBB', 'XBB (Bond Markets Unit European Monetary Unit (E.M.U.-6))'), ('XBC', 'XBC (Bond Markets Unit European Unit of Account 9 (E.U.A.-9))'), ('XBD', 'XBD (Bond Markets Unit European Unit of Account 17 (E.U.A.-17))'), ('XCD', 'XCD (East Caribbean Dollar)'), ('XDR', 'XDR (SDR (Special Drawing Right))'), ('XOF', 'XOF (CFA Franc BCEAO)'), ('XPD', 'XPD (Palladium)'), ('XPF', 'XPF (CFP Franc)'), ('XPT', 'XPT (Platinum)'), ('XSU', 'XSU (Sucre)'), ('XTS', 'XTS (Codes specifically reserved for testing purposes)'), ('XUA', 'XUA (ADB Unit of Account)'), ('XXX', 'XXX (The codes assigned for transactions where no currency is involved)'), ('YER', 'YER (Yemeni Rial)'), ('ZAR', 'ZAR (Rand)'), ('ZMW', 'ZMW (Zambian Kwacha)'), ('ZWL', 'ZWL (Zimbabwe Dollar)')], help_text='The currency used for billing.', max_length=4)),
('external_reference', models.CharField(blank=True, max_length=256, null=True)),
('data', annoying.fields.JSONField(blank=True, default={}, deserializer=json.loads, null=True, serializer=annoying.fields.dumps)),
('state', django_fsm.FSMField(choices=[('initial', 'Initial'), ('pending', 'Pending'), ('settled', 'Settled'), ('failed', 'Failed'), ('canceled', 'Canceled'), ('refunded', 'Refunded')], default='initial', max_length=8)),
('uuid', models.UUIDField(default=uuid.uuid4)),
('valid_until', models.DateTimeField(blank=True, null=True)),
('last_access', models.DateTimeField(blank=True, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('updated_at', silver.utils.models.AutoDateTimeField(default=django.utils.timezone.now)),
('fail_code', models.CharField(blank=True, choices=[('default', 'default'), ('insufficient_funds', 'insufficient_funds'), ('expired_payment_method', 'expired_payment_method'), ('expired_card', 'expired_card'), ('invalid_payment_method', 'invalid_payment_method'), ('invalid_card', 'invalid_card'), ('limit_exceeded', 'limit_exceeded'), ('transaction_declined', 'transaction_declined'), ('transaction_declined_by_bank', 'transaction_declined_by_bank'), ('transaction_hard_declined', 'transaction_hard_declined'), ('transaction_hard_declined_by_bank', 'transaction_hard_declined_by_bank')], max_length=32, null=True)),
('refund_code', models.CharField(blank=True, choices=[('default', 'default')], max_length=32, null=True)),
('cancel_code', models.CharField(blank=True, choices=[('default', 'default')], max_length=32, null=True)),
('invoice', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoice_transactions', to='silver.BillingDocumentBase')),
('payment_method', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='silver.PaymentMethod')),
('proforma', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='proforma_transactions', to='silver.BillingDocumentBase')),
],
options={
'ordering': ['-id'],
},
),
migrations.AlterIndexTogether(
name='provider',
index_together=set([('name', 'company')]),
),
migrations.AddField(
model_name='plan',
name='product_code',
field=models.ForeignKey(help_text='The product code for this plan.', on_delete=django.db.models.deletion.CASCADE, to='silver.ProductCode'),
),
migrations.AddField(
model_name='plan',
name='provider',
field=models.ForeignKey(help_text='The provider which provides the plan.', on_delete=django.db.models.deletion.CASCADE, related_name='plans', to='silver.Provider'),
),
migrations.AddField(
model_name='meteredfeatureunitslog',
name='subscription',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mf_log_entries', to='silver.Subscription'),
),
migrations.AddField(
model_name='meteredfeature',
name='product_code',
field=silver.utils.models.UnsavedForeignKey(help_text='The product code for this plan.', on_delete=django.db.models.deletion.CASCADE, to='silver.ProductCode'),
),
migrations.AddField(
model_name='documententry',
name='product_code',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoices', to='silver.ProductCode'),
),
migrations.AddField(
model_name='documententry',
name='proforma',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='proforma_entries', to='silver.BillingDocumentBase'),
),
migrations.AlterIndexTogether(
name='customer',
index_together=set([('first_name', 'last_name', 'company')]),
),
migrations.AddField(
model_name='billinglog',
name='subscription',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='billing_logs', to='silver.Subscription'),
),
migrations.AddField(
model_name='billingdocumentbase',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='silver.Customer'),
),
migrations.AddField(
model_name='billingdocumentbase',
name='pdf',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='silver.PDF'),
),
migrations.AddField(
model_name='billingdocumentbase',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='silver.Provider'),
),
migrations.AddField(
model_name='billingdocumentbase',
name='related_document',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reverse_related_document', to='silver.BillingDocumentBase'),
),
migrations.CreateModel(
name='Invoice',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('silver.billingdocumentbase',),
),
migrations.CreateModel(
name='Proforma',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('silver.billingdocumentbase',),
),
migrations.AlterUniqueTogether(
name='meteredfeatureunitslog',
unique_together=set([('metered_feature', 'subscription', 'start_date', 'end_date')]),
),
migrations.AlterUniqueTogether(
name='billingdocumentbase',
unique_together=set([('kind', 'provider', 'series', 'number')]),
),
]
| 182.207084
| 5,858
| 0.590459
| 7,978
| 66,870
| 4.893081
| 0.161319
| 0.017291
| 0.009017
| 0.0166
| 0.860645
| 0.834567
| 0.808771
| 0.79399
| 0.777442
| 0.761739
| 0
| 0.004627
| 0.162883
| 66,870
| 366
| 5,859
| 182.704918
| 0.692736
| 0.001032
| 0
| 0.48324
| 1
| 0.030726
| 0.500734
| 0.015405
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.00838
| 0.039106
| 0
| 0.050279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
41fe8cbf83da61053b9d68cdeb3dd6d38bcc9877
| 16,872
|
py
|
Python
|
tb_api_client/swagger_client/apis/tenant_controller_api.py
|
MOSAIC-LoPoW/oss7-thingsboard-backend-example
|
9b289dd7fdbb6e932ca338ad497a7bb1fc84d010
|
[
"Apache-2.0"
] | 5
|
2017-11-27T15:48:16.000Z
|
2020-09-21T04:18:47.000Z
|
tb_api_client/swagger_client/apis/tenant_controller_api.py
|
MOSAIC-LoPoW/oss7-thingsboard-backend-example
|
9b289dd7fdbb6e932ca338ad497a7bb1fc84d010
|
[
"Apache-2.0"
] | null | null | null |
tb_api_client/swagger_client/apis/tenant_controller_api.py
|
MOSAIC-LoPoW/oss7-thingsboard-backend-example
|
9b289dd7fdbb6e932ca338ad497a7bb1fc84d010
|
[
"Apache-2.0"
] | 6
|
2018-01-14T17:23:46.000Z
|
2019-06-24T13:38:54.000Z
|
# coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>.
OpenAPI spec version: 2.0
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class TenantControllerApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_tenant_using_delete(self, tenant_id, **kwargs):
"""
deleteTenant
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_tenant_using_delete(tenant_id, async=True)
>>> result = thread.get()
:param async bool
:param str tenant_id: tenantId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_tenant_using_delete_with_http_info(tenant_id, **kwargs)
else:
(data) = self.delete_tenant_using_delete_with_http_info(tenant_id, **kwargs)
return data
def delete_tenant_using_delete_with_http_info(self, tenant_id, **kwargs):
"""
deleteTenant
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_tenant_using_delete_with_http_info(tenant_id, async=True)
>>> result = thread.get()
:param async bool
:param str tenant_id: tenantId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tenant_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_tenant_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'tenant_id' is set
if ('tenant_id' not in params) or (params['tenant_id'] is None):
raise ValueError("Missing the required parameter `tenant_id` when calling `delete_tenant_using_delete`")
collection_formats = {}
path_params = {}
if 'tenant_id' in params:
path_params['tenantId'] = params['tenant_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/tenant/{tenantId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_by_id_using_get(self, tenant_id, **kwargs):
"""
getTenantById
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tenant_by_id_using_get(tenant_id, async=True)
>>> result = thread.get()
:param async bool
:param str tenant_id: tenantId (required)
:return: Tenant
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_tenant_by_id_using_get_with_http_info(tenant_id, **kwargs)
else:
(data) = self.get_tenant_by_id_using_get_with_http_info(tenant_id, **kwargs)
return data
def get_tenant_by_id_using_get_with_http_info(self, tenant_id, **kwargs):
"""
getTenantById
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tenant_by_id_using_get_with_http_info(tenant_id, async=True)
>>> result = thread.get()
:param async bool
:param str tenant_id: tenantId (required)
:return: Tenant
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tenant_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'tenant_id' is set
if ('tenant_id' not in params) or (params['tenant_id'] is None):
raise ValueError("Missing the required parameter `tenant_id` when calling `get_tenant_by_id_using_get`")
collection_formats = {}
path_params = {}
if 'tenant_id' in params:
path_params['tenantId'] = params['tenant_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/tenant/{tenantId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tenant',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenants_using_get(self, limit, **kwargs):
"""
getTenants
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tenants_using_get(limit, async=True)
>>> result = thread.get()
:param async bool
:param str limit: limit (required)
:param str text_search: textSearch
:param str id_offset: idOffset
:param str text_offset: textOffset
:return: TextPageDataTenant
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_tenants_using_get_with_http_info(limit, **kwargs)
else:
(data) = self.get_tenants_using_get_with_http_info(limit, **kwargs)
return data
def get_tenants_using_get_with_http_info(self, limit, **kwargs):
"""
getTenants
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tenants_using_get_with_http_info(limit, async=True)
>>> result = thread.get()
:param async bool
:param str limit: limit (required)
:param str text_search: textSearch
:param str id_offset: idOffset
:param str text_offset: textOffset
:return: TextPageDataTenant
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'text_search', 'id_offset', 'text_offset']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenants_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'limit' is set
if ('limit' not in params) or (params['limit'] is None):
raise ValueError("Missing the required parameter `limit` when calling `get_tenants_using_get`")
collection_formats = {}
path_params = {}
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search']))
if 'id_offset' in params:
query_params.append(('idOffset', params['id_offset']))
if 'text_offset' in params:
query_params.append(('textOffset', params['text_offset']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/tenants', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TextPageDataTenant',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_tenant_using_post(self, tenant, **kwargs):
"""
saveTenant
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.save_tenant_using_post(tenant, async=True)
>>> result = thread.get()
:param async bool
:param Tenant tenant: tenant (required)
:return: Tenant
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.save_tenant_using_post_with_http_info(tenant, **kwargs)
else:
(data) = self.save_tenant_using_post_with_http_info(tenant, **kwargs)
return data
def save_tenant_using_post_with_http_info(self, tenant, **kwargs):
"""
saveTenant
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.save_tenant_using_post_with_http_info(tenant, async=True)
>>> result = thread.get()
:param async bool
:param Tenant tenant: tenant (required)
:return: Tenant
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tenant']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_tenant_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'tenant' is set
if ('tenant' not in params) or (params['tenant'] is None):
raise ValueError("Missing the required parameter `tenant` when calling `save_tenant_using_post`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tenant' in params:
body_params = params['tenant']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/tenant', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tenant',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.965358
| 149
| 0.556543
| 1,742
| 16,872
| 5.1062
| 0.097589
| 0.026981
| 0.025183
| 0.032378
| 0.892974
| 0.875773
| 0.8543
| 0.83294
| 0.824845
| 0.810343
| 0
| 0.000462
| 0.358938
| 16,872
| 432
| 150
| 39.055556
| 0.82193
| 0.031413
| 0
| 0.737778
| 0
| 0
| 0.158259
| 0.043263
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.026667
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5119e9e1bf0c97f24b4d156355297e70ff282020
| 18,504
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/signaling_protocols/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/signaling_protocols/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/signaling_protocols/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import rsvp_te
from . import segment_routing
class signaling_protocols(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/signaling-protocols. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: top-level signaling protocol configuration
"""
__slots__ = ("_path_helper", "_extmethods", "__rsvp_te", "__segment_routing")
_yang_name = "signaling-protocols"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__rsvp_te = YANGDynClass(
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__segment_routing = YANGDynClass(
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances", "network-instance", "mpls", "signaling-protocols"
]
def _get_rsvp_te(self):
"""
Getter method for rsvp_te, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te (container)
YANG Description: RSVP-TE global signaling protocol configuration
"""
return self.__rsvp_te
def _set_rsvp_te(self, v, load=False):
"""
Setter method for rsvp_te, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp_te is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rsvp_te() directly.
YANG Description: RSVP-TE global signaling protocol configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """rsvp_te must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=rsvp_te.rsvp_te, is_container='container', yang_name="rsvp-te", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__rsvp_te = t
if hasattr(self, "_set"):
self._set()
def _unset_rsvp_te(self):
self.__rsvp_te = YANGDynClass(
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_segment_routing(self):
"""
Getter method for segment_routing, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing (container)
YANG Description: MPLS-specific Segment Routing configuration and operational state
parameters
"""
return self.__segment_routing
def _set_segment_routing(self, v, load=False):
"""
Setter method for segment_routing, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_segment_routing is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_segment_routing() directly.
YANG Description: MPLS-specific Segment Routing configuration and operational state
parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """segment_routing must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=segment_routing.segment_routing, is_container='container', yang_name="segment-routing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__segment_routing = t
if hasattr(self, "_set"):
self._set()
def _unset_segment_routing(self):
self.__segment_routing = YANGDynClass(
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
rsvp_te = __builtin__.property(_get_rsvp_te, _set_rsvp_te)
segment_routing = __builtin__.property(_get_segment_routing, _set_segment_routing)
_pyangbind_elements = OrderedDict(
[("rsvp_te", rsvp_te), ("segment_routing", segment_routing)]
)
from . import rsvp_te
from . import segment_routing
class signaling_protocols(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/signaling-protocols. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: top-level signaling protocol configuration
"""
__slots__ = ("_path_helper", "_extmethods", "__rsvp_te", "__segment_routing")
_yang_name = "signaling-protocols"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__rsvp_te = YANGDynClass(
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__segment_routing = YANGDynClass(
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances", "network-instance", "mpls", "signaling-protocols"
]
def _get_rsvp_te(self):
"""
Getter method for rsvp_te, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te (container)
YANG Description: RSVP-TE global signaling protocol configuration
"""
return self.__rsvp_te
def _set_rsvp_te(self, v, load=False):
"""
Setter method for rsvp_te, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp_te is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rsvp_te() directly.
YANG Description: RSVP-TE global signaling protocol configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """rsvp_te must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=rsvp_te.rsvp_te, is_container='container', yang_name="rsvp-te", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__rsvp_te = t
if hasattr(self, "_set"):
self._set()
def _unset_rsvp_te(self):
self.__rsvp_te = YANGDynClass(
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_segment_routing(self):
"""
Getter method for segment_routing, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing (container)
YANG Description: MPLS-specific Segment Routing configuration and operational state
parameters
"""
return self.__segment_routing
def _set_segment_routing(self, v, load=False):
"""
Setter method for segment_routing, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_segment_routing is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_segment_routing() directly.
YANG Description: MPLS-specific Segment Routing configuration and operational state
parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """segment_routing must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=segment_routing.segment_routing, is_container='container', yang_name="segment-routing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__segment_routing = t
if hasattr(self, "_set"):
self._set()
def _unset_segment_routing(self):
self.__segment_routing = YANGDynClass(
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
rsvp_te = __builtin__.property(_get_rsvp_te, _set_rsvp_te)
segment_routing = __builtin__.property(_get_segment_routing, _set_segment_routing)
_pyangbind_elements = OrderedDict(
[("rsvp_te", rsvp_te), ("segment_routing", segment_routing)]
)
| 39.708155
| 404
| 0.619758
| 1,976
| 18,504
| 5.544534
| 0.091093
| 0.038335
| 0.043447
| 0.050931
| 0.965772
| 0.945966
| 0.945966
| 0.945966
| 0.945966
| 0.945966
| 0
| 0.00137
| 0.289775
| 18,504
| 465
| 405
| 39.793548
| 0.832293
| 0.194066
| 0
| 0.866279
| 0
| 0.011628
| 0.24191
| 0.078149
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.055233
| 0
| 0.165698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5140071a8ad46404186cee1d2f6c448428abfa12
| 2,417
|
py
|
Python
|
S4/S4 Library/simulation/server_commands/video_commands.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/server_commands/video_commands.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/server_commands/video_commands.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
from objects.components import types
import objects
import services
import sims4.commands
@sims4.commands.Command('video.object_info')
def get_video_object_info(obj_id:int, _connection=None):
manager = services.object_manager()
obj = None
if obj_id in manager:
obj = manager.get(obj_id)
else:
sims4.commands.output('Object ID {} not present in the object manager.'.format(obj_id), _connection)
if obj is not None:
sims4.commands.output('Object {} ({})'.format(obj_id, obj.__class__.__name__), _connection)
v = obj.get_component(types.VIDEO_COMPONENT)
if v is not None:
sims4.commands.output(' ' + repr(v), _connection)
else:
sims4.commands.output(' Object does not have video playback capabilities.', _connection)
@sims4.commands.Command('video.set_clips')
def set_video_clips(obj_id:int, *clip_names, _connection=None):
manager = services.object_manager()
obj = None
if obj_id in manager:
obj = manager.get(obj_id)
else:
sims4.commands.output('Object ID {} not present in the object manager.'.format(obj_id), _connection)
if obj is not None:
sims4.commands.output('Object {} ({})'.format(obj_id, obj.__class__.__name__), _connection)
v = obj.get_component(types.VIDEO_COMPONENT)
if v is not None:
v.set_video_clips(clip_names, False)
sims4.commands.output(' Added {} clip(s).'.format(len(clip_names)), _connection)
else:
sims4.commands.output(' Object does not have video playback capabilities.', _connection)
@sims4.commands.Command('video.add_clips')
def add_video_clips(obj_id:int, *clip_names, _connection=None):
manager = services.object_manager()
obj = None
if obj_id in manager:
obj = manager.get(obj_id)
else:
sims4.commands.output('Object ID {} not present in the object manager.'.format(obj_id), _connection)
if obj is not None:
sims4.commands.output('Object {} ({})'.format(obj_id, obj.__class__.__name__), _connection)
v = obj.get_component(types.VIDEO_COMPONENT)
if v is not None:
v.add_video_clips(clip_names, False)
sims4.commands.output(' Added {} clip(s).'.format(len(clip_names)), _connection)
else:
sims4.commands.output(' Object does not have video playback capabilities.', _connection)
| 43.945455
| 108
| 0.675217
| 320
| 2,417
| 4.840625
| 0.146875
| 0.13428
| 0.147192
| 0.145255
| 0.88315
| 0.88315
| 0.870884
| 0.870884
| 0.870884
| 0.870884
| 0
| 0.008381
| 0.210178
| 2,417
| 54
| 109
| 44.759259
| 0.803038
| 0
| 0
| 0.745098
| 0
| 0
| 0.17501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.078431
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5aead2bb7acb0edf5fb4880fe085e737776e4cf7
| 1,548
|
py
|
Python
|
runner/tests/run/json/test_json.py
|
PC-Trip/runner
|
e13291e25a2bc2962523a1de2d13725609497cb5
|
[
"MIT"
] | null | null | null |
runner/tests/run/json/test_json.py
|
PC-Trip/runner
|
e13291e25a2bc2962523a1de2d13725609497cb5
|
[
"MIT"
] | null | null | null |
runner/tests/run/json/test_json.py
|
PC-Trip/runner
|
e13291e25a2bc2962523a1de2d13725609497cb5
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.mark.parametrize("run", ["sequence.json"], indirect=True)
def test_sequence(run):
assert run == 0
@pytest.mark.parametrize("run", ["thread.json"], indirect=True)
def test_thread(run):
assert run == 0
@pytest.mark.parametrize("run", ["process.json"], indirect=True)
def test_process(run):
assert run == 0
@pytest.mark.parametrize("run", ["thread_jobs.json"], indirect=True)
def test_thread_jobs(run):
assert run == 0
@pytest.mark.parametrize("run", ["thread_jobs_broadcast.json"], indirect=True)
def test_thread_jobs_broadcast(run):
assert run == 0
@pytest.mark.parametrize("run", ["thread_jobs_workers.json"], indirect=True)
def test_thread_jobs_workers(run):
assert run == 0
@pytest.mark.parametrize("run", ["thread_jobs_broadcast_workers.json"], indirect=True)
def test_thread_jobs_broadcast_workers(run):
assert run == 0
@pytest.mark.parametrize("run", ["process_jobs.json"], indirect=True)
def test_process_jobs(run):
assert run == 0
@pytest.mark.parametrize("run", ["process_jobs_broadcast.json"], indirect=True)
def test_process_jobs_broadcast(run):
assert run == 0
@pytest.mark.parametrize("run", ["process_jobs_workers.json"], indirect=True)
def test_process_jobs_workers(run):
assert run == 0
@pytest.mark.parametrize("run", ["process_jobs_broadcast_workers.json"], indirect=True)
def test_process_jobs_broadcast_workers(run):
assert run == 0
@pytest.mark.parametrize("run", ["action.json"], indirect=True)
def test_action(run):
assert run == 0
| 24.967742
| 87
| 0.72739
| 214
| 1,548
| 5.056075
| 0.088785
| 0.110906
| 0.232902
| 0.266174
| 0.928835
| 0.886322
| 0.831793
| 0.777264
| 0.5878
| 0.5
| 0
| 0.008785
| 0.117571
| 1,548
| 61
| 88
| 25.377049
| 0.783309
| 0
| 0
| 0.324324
| 0
| 0
| 0.185401
| 0.110465
| 0
| 0
| 0
| 0
| 0.324324
| 1
| 0.324324
| false
| 0
| 0.027027
| 0
| 0.351351
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5175e60c7f1183ee1d14456eeb409ef841bebe39
| 10,182
|
py
|
Python
|
heat/tests/test_stack_lock.py
|
redhat-openstack/heat
|
6b9be0a868b857e942c1cc90594d0f3a0d0725d0
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/test_stack_lock.py
|
redhat-openstack/heat
|
6b9be0a868b857e942c1cc90594d0f3a0d0725d0
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/test_stack_lock.py
|
redhat-openstack/heat
|
6b9be0a868b857e942c1cc90594d0f3a0d0725d0
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo import messaging
from heat.common import exception
from heat.db import api as db_api
from heat.engine import stack_lock
from heat.tests.common import HeatTestCase
from heat.tests import utils
class StackLockTest(HeatTestCase):
def setUp(self):
super(StackLockTest, self).setUp()
self.context = utils.dummy_context()
self.stack = self.m.CreateMockAnything()
self.stack.id = "aae01f2d-52ae-47ac-8a0d-3fde3d220fea"
self.stack.name = "test_stack"
self.stack.action = "CREATE"
self.engine_id = stack_lock.StackLock.generate_engine_id()
class TestThreadLockException(Exception):
pass
def test_successful_acquire_new_lock(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn(None)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
slock.acquire()
self.m.VerifyAll()
def test_failed_acquire_existing_lock_current_engine(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn(self.engine_id)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.m.VerifyAll()
def test_successful_acquire_existing_lock_engine_dead(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
self.m.StubOutWithMock(db_api, "stack_lock_steal")
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
self.engine_id).AndReturn(None)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
slock.acquire()
self.m.VerifyAll()
def test_failed_acquire_existing_lock_engine_alive(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=True)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.m.VerifyAll()
def test_failed_acquire_existing_lock_engine_dead(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
self.m.StubOutWithMock(db_api, "stack_lock_steal")
db_api.stack_lock_steal(
self.stack.id, "fake-engine-id",
self.engine_id).AndReturn("fake-engine-id2")
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.m.VerifyAll()
def test_successful_acquire_with_retry(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
self.m.StubOutWithMock(db_api, "stack_lock_steal")
db_api.stack_lock_steal(
self.stack.id, "fake-engine-id", self.engine_id).AndReturn(True)
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
db_api.stack_lock_steal(
self.stack.id, "fake-engine-id", self.engine_id).AndReturn(None)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
slock.acquire()
self.m.VerifyAll()
def test_failed_acquire_one_retry_only(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
self.m.StubOutWithMock(db_api, "stack_lock_steal")
db_api.stack_lock_steal(
self.stack.id, "fake-engine-id", self.engine_id).AndReturn(True)
db_api.stack_lock_create(
self.stack.id, self.engine_id).AndReturn("fake-engine-id")
db_api.stack_lock_steal(
self.stack.id, "fake-engine-id", self.engine_id).AndReturn(True)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.m.VerifyAll()
def test_thread_lock_context_mgr_exception_acquire_success(self):
db_api.stack_lock_create = mock.Mock(return_value=None)
db_api.stack_lock_release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
def check_thread_lock():
with slock.thread_lock(self.stack.id):
self.assertEqual(1, db_api.stack_lock_create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertEqual(1, db_api.stack_lock_release.call_count)
def test_thread_lock_context_mgr_exception_acquire_fail(self):
db_api.stack_lock_create = mock.Mock(return_value=self.engine_id)
db_api.stack_lock_release = mock.Mock()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
def check_thread_lock():
with slock.thread_lock(self.stack.id):
self.assertEqual(1, db_api.stack_lock_create.call_count)
raise exception.ActionInProgress
self.assertRaises(exception.ActionInProgress, check_thread_lock)
assert not db_api.stack_lock_release.called
def test_thread_lock_context_mgr_no_exception(self):
db_api.stack_lock_create = mock.Mock(return_value=None)
db_api.stack_lock_release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
with slock.thread_lock(self.stack.id):
self.assertEqual(1, db_api.stack_lock_create.call_count)
assert not db_api.stack_lock_release.called
def test_try_thread_lock_context_mgr_exception(self):
db_api.stack_lock_create = mock.Mock(return_value=None)
db_api.stack_lock_release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
def check_thread_lock():
with slock.try_thread_lock(self.stack.id):
self.assertEqual(1, db_api.stack_lock_create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertEqual(1, db_api.stack_lock_release.call_count)
def test_try_thread_lock_context_mgr_no_exception(self):
db_api.stack_lock_create = mock.Mock(return_value=None)
db_api.stack_lock_release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
with slock.try_thread_lock(self.stack.id):
self.assertEqual(1, db_api.stack_lock_create.call_count)
assert not db_api.stack_lock_release.called
def test_try_thread_lock_context_mgr_existing_lock(self):
db_api.stack_lock_create = mock.Mock(return_value=1234)
db_api.stack_lock_release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
def check_thread_lock():
with slock.try_thread_lock(self.stack.id):
self.assertEqual(1, db_api.stack_lock_create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
assert not db_api.stack_lock_release.called
def test_engine_alive_ok(self):
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
mget_client = self.patchobject(stack_lock.rpc_messaging,
'get_rpc_client')
mclient = mget_client.return_value
mclient_ctx = mclient.prepare.return_value
mclient_ctx.call.return_value = True
ret = slock.engine_alive(self.context, self.engine_id)
self.assertTrue(ret)
mclient.prepare.assert_called_once_with(timeout=2)
mclient_ctx.call.assert_called_once_with(self.context, 'listening')
def test_engine_alive_timeout(self):
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
mget_client = self.patchobject(stack_lock.rpc_messaging,
'get_rpc_client')
mclient = mget_client.return_value
mclient_ctx = mclient.prepare.return_value
mclient_ctx.call.side_effect = messaging.MessagingTimeout('too slow')
ret = slock.engine_alive(self.context, self.engine_id)
self.assertIs(False, ret)
mclient.prepare.assert_called_once_with(timeout=2)
mclient_ctx.call.assert_called_once_with(self.context, 'listening')
| 43.32766
| 78
| 0.698095
| 1,348
| 10,182
| 4.989614
| 0.119436
| 0.092328
| 0.074338
| 0.104074
| 0.822777
| 0.822777
| 0.821588
| 0.815343
| 0.804639
| 0.804639
| 0
| 0.004082
| 0.20605
| 10,182
| 234
| 79
| 43.512821
| 0.827932
| 0.053624
| 0
| 0.715909
| 0
| 0
| 0.056757
| 0.003742
| 0
| 0
| 0
| 0
| 0.147727
| 1
| 0.113636
| false
| 0.005682
| 0.039773
| 0
| 0.164773
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51772a9f4c6eed51a4a91106905aeb0e434a8422
| 166
|
py
|
Python
|
accounts/views.py
|
meiordac/eCommerce
|
0efce9ebf5ecb55378890445b0bed16c07613121
|
[
"MIT"
] | 2
|
2017-01-13T12:39:18.000Z
|
2020-05-28T21:27:26.000Z
|
accounts/views.py
|
meiordac/eCommerce
|
0efce9ebf5ecb55378890445b0bed16c07613121
|
[
"MIT"
] | 1
|
2020-05-28T21:31:14.000Z
|
2020-05-28T21:31:14.000Z
|
accounts/views.py
|
meiordac/eCommerce
|
0efce9ebf5ecb55378890445b0bed16c07613121
|
[
"MIT"
] | 1
|
2017-10-16T08:30:59.000Z
|
2017-10-16T08:30:59.000Z
|
from django.shortcuts import render
def login(request):
return render(request, 'login.html')
def logout(request):
return render(request, 'logout.html')
| 20.75
| 41
| 0.716867
| 21
| 166
| 5.666667
| 0.52381
| 0.218487
| 0.319328
| 0.436975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168675
| 166
| 8
| 42
| 20.75
| 0.862319
| 0
| 0
| 0
| 0
| 0
| 0.125749
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
51d956f2e81e99145eecf963abe0ebaf17acc9ae
| 18,174
|
py
|
Python
|
ajuste-bayes_mcmc.py
|
jpacuna99/AcunaJuan_ejercicio07
|
1a68387a25ed42aa31997a46b7d9b9d70481457f
|
[
"MIT"
] | null | null | null |
ajuste-bayes_mcmc.py
|
jpacuna99/AcunaJuan_ejercicio07
|
1a68387a25ed42aa31997a46b7d9b9d70481457f
|
[
"MIT"
] | null | null | null |
ajuste-bayes_mcmc.py
|
jpacuna99/AcunaJuan_ejercicio07
|
1a68387a25ed42aa31997a46b7d9b9d70481457f
|
[
"MIT"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[ 0.20107669 0.29543058 -0.16135565 -0.26139101 0.21345801]]\n",
"[ 0.20107669 0.29543058 -0.16135565 -0.26139101 0.21345801]\n"
]
},
{
"ename": "ValueError",
"evalue": "operands could not be broadcast together with shapes (4,5) (4,) ",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-24-9f4a1b7ee149>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msubplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 102\u001b[0;31m \u001b[0mbetas\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmetropolis\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 103\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbetas\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m20\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtitle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mr\"$m_{}={:.2f}$ $c_{}={:.2f}$\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mm\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-24-9f4a1b7ee149>\u001b[0m in \u001b[0;36mmetropolis\u001b[0;34m(x)\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0mx_guess\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_walk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0.1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_walk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0.1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_walk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0.1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_walk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0.1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_walk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0.1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[0mprint\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mx_walk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 71\u001b[0;31m \u001b[0ma\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mposterior\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmu\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0msigma\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mx_guess\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mposterior\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmu\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0msigma\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mx_walk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 72\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m>=\u001b[0m\u001b[0;36m1.\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0mx_walk\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvstack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_walk\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mx_guess\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-24-9f4a1b7ee149>\u001b[0m in \u001b[0;36mposterior\u001b[0;34m(mu, x, sigma, a)\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0mPosterior\u001b[0m \u001b[0mcalculado\u001b[0m \u001b[0mcon\u001b[0m \u001b[0mla\u001b[0m \u001b[0mnormalizacion\u001b[0m \u001b[0madecuada\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \"\"\"\n\u001b[0;32m---> 30\u001b[0;31m \u001b[0mpost\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlike\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msigma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmu\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mprior\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmu\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 31\u001b[0m \u001b[0mevidencia\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrapz\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpost\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmu\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mpost\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mevidencia\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-24-9f4a1b7ee149>\u001b[0m in \u001b[0;36mlike\u001b[0;34m(x, sigma, mu, a)\u001b[0m\n\u001b[1;32m 21\u001b[0m \"\"\"\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0mL\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1.\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m2.\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mL\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mValueError\u001b[0m: operands could not be broadcast together with shapes (4,5) (4,) "
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAMsAAACGCAYAAABzPX6BAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAJF0lEQVR4nO3dX4wdZR3G8e8jFYiIsFBMiNLSxmIpaChsEEOiGLWUmhQSjLYJsTXVBgRM9ErDBabcoEZJSFBotAFM5O/VGiGkWpomhALbgBRqgLaithIpbOEGRFp+XsxbnR673d9u33POHvp8kpOdMzPved852Wdn5pzZ3ygiMLOJfaDfAzAbFA6LWZLDYpbksJglOSxmSQ6LWdKEYZG0TtKrkp4bZ7kk3Sppu6RnJZ3fWrZC0kvlsaLmwM16LbNnuRNYfJjllwHzymM18EsASacANwKfAS4EbpQ0dCSDNeunCcMSEZuAscOscjlwdzQ2AydLOh24FFgfEWMRsRdYz+FDZzat1Thn+Rjw99bzXWXeePPNBtKMfg8AQNJqmkM4TjjhhAvmz5/f5xHZ+9mWLVtei4jTJtuuRlh2A2e0nn+8zNsNXNIxf+OhXiAi1gJrAYaHh2N0dLTCsMwOTdJfp9KuxmHYCPCN8qnYRcCbEfEK8AiwSNJQObFfVOaZDaQJ9yyS7qHZQ8yUtIvmE64PAkTE7cBDwBJgO/AW8M2ybEzSTcBT5aXWRMThPigwm9YmDEtELJ9geQDXjrNsHbBuakMzm178Db5ZksNiluSwmCU5LGZJDotZksNiluSwmCU5LGZJDotZksNiluSwmCU5LGZJDotZksNiluSwmCU5LGZJqbBIWizphVJI7weHWH6LpGfK40VJb7SW7W8tG6k5eLNeyvxb8THAbcCXacoZPSVpJCK2HVgnIr7XWv96YGHrJd6OiPPqDdmsPzJ7lguB7RGxMyL+DdxLU1hvPMuBe2oMzmw6yYQlXSxP0mxgDrChNft4SaOSNku6YsojNeuz2kX2lgEPRsT+1rzZEbFb0lxgg6StEbGj3ahdZG/WrFmVh2RWR2bPMl4RvUNZRschWETsLj930hTZW9jZKCLWRsRwRAyfdtqkCwWa9UQmLE8B8yTNkXQsTSD+71MtSfOBIeDx1rwhSceV6ZnAxcC2zrZmgyBTN2yfpOtoqkkeA6yLiOclrQFGI+JAcJYB98bB9wo/G7hD0ns0wby5/Sma2SDRwb/b/edax9ZtkrZExPBk2/kbfLMkh8UsyWExS3JYzJIcFrMkh8UsyWExS3JYzJIcFrMkh8UsyWExS3JYzJIcFrMkh8UsyWExS3JYzJJqFdlbKWlPq5jet1rLVkh6qTxW1By8WS9VKbJX3BcR13W0PQW4ERgGAthS2u6tMnqzHupGkb22S4H1ETFWArIeWDy1oZr1V80ie1dKelbSg5IOlE5KtZW0uhTiG92zZ09y6Ga9VesE/3fAmRHxaZq9x12Taey6YTYIqhTZi4jXI+Kd8vRXwAXZtmaDokqRPUmnt54uBf5cph8BFpVie0PAojLPbODUKrL3XUlLgX3AGLCytB2TdBNN4ADWRMRYF7bDrOtcZM+OOi6yZ9ZlDotZksNiluSwmCU5LGZJDotZksNiluSwmCU5LGZJDotZksNiluSwmCU5LGZJDotZksNillSrbtj3JW0rBSv+KGl2a9n+Vj2xkc62ZoOiVt2wp4HhiHhL0jXAT4Cvl2VvR8R5lcdt1nNV6oZFxKMR8VZ5upmmMIXZ+0rNumEHrAIebj0/vtQE2yzpiimM0WxamPAwbDIkXUVTqvXzrdmzI2K3pLnABklbI2JHR7vVwGqAWbNm1RySWTVV6oYBSPoScAOwtFVDjIjYXX7uBDYCCzvbusieDYJadcMWAnfQBOXV1vwhSceV6ZnAxUBnQXGzgVCrbthPgQ8DD0gC+FtELAXOBu6Q9B5NMG8+RPV9s4HgumF21HHdMLMuc1jMkhwWsySHxSzJYTFLcljMkhwWsySHxSzJYTFLcljMkhwWsySHxSzJYTFLcljMkhwWsySHxSypVpG94yTdV5Y/IenM1rIflvkvSLq03tDNemvCsLSK7F0GLACWS1rQsdoqYG9EfAK4BfhxabuA5n/2zwEWA78or2c2cKoU2SvP7yrTDwJfVPPP+JcD90bEOxHxF2B7eT2zgVOryN5/14mIfcCbwKnJtmYDoWqRvalqF9kD3pH0XJ+GMhN47Sjqt59993ObPzmVRpmwZIrsHVhnl6QZwEnA68m2RMRaYC2ApNGpVN6ooV99e5t73/dU2lUpsleeryjTXwU2RFNjaQRYVj4tmwPMA56cykDN+q1Wkb1fA7+RtB0YowkUZb37aapQ7gOujYj9XdoWs+6KiGn1AFYfbX17mwej72lXkdJsuvLlLmZJfQvLkVxC04O+x71HZjf7ba13paSQVOXToky/kr5Wtvl5Sb+t0W+mb0mzJD0q6enyfi+p1O86Sa+O9zWEGreWcT0r6fwJX7RPx4zHADuAucCxwJ+ABR3rfAe4vUwvA+7rYd9fAD5Upq+p0Xem37LeicAmmtsNDvdoe+fR3Bd0qDz/aA/f67XANWV6AfBypb4/B5wPPDfO8iU0d6gTcBHwxESv2a89y5FcQtP1vqM798jMbDPATTTX1v2rQp/Zfr8N3BYRewGidY+dHvQdwEfK9EnAP2p0HBGbaD6ZHc/lwN3R2AycLOn0w71mv8JyJJfQ9KLvts57ZHat33IocEZE/L5Cf+l+gbOAsyQ9Vu79ubiHff8IuErSLuAh4PpKfU9k0pdiTYvLXaarce6R2a2+PgD8HFjZ7b4OYQbNodglNHvRTZI+FRFv9KDv5cCdEfEzSZ+l+b7u3Ih4rwd9T0q/9iyTuYSGjktoetH3uPfI7GK/JwLnAhslvUxzHD1S4SQ/s727gJGIeDeaq8NfpAnPkcr0vQq4HyAiHgeOp7lurNtSvwcHqXEyNYWTrxnATmAO/zvxO6djnWs5+AT//h72vZDmxHReL7e5Y/2N1DnBz2zvYuCuMj2T5vDk1B71/TCwskyfTXPOokrv+ZmMf4L/FQ4+wX9ywter9cswhQ1ZQvMXbAdwQ5m3huYvOTR/YR6g+R+YJ4G5Pez7D8A/gWfKY6QX/XasWyUsye0VzSHgNmArsKyH7/UC4LESpGeARZX6vQd4BXiXZs+5CrgauLq1zbeVcW3NvNf+Bt8syd/gmyU5LGZJDotZksNiluSwmCU5LGZJDotZksNilvQfW3XWBOWLVHgAAAAASUVORK5CYII=\n",
"text/plain": [
"<Figure size 432x288 with 1 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"def prior(mu):\n",
" \"\"\"\n",
" Densidad de probabilidad de mu\n",
" \"\"\"\n",
" p = np.ones(len(mu))/(mu.max()-mu.min())\n",
" return p\n",
"\n",
"def like(x, sigma, mu,a):\n",
" \"\"\"\n",
" Likelihod de tener un dato x e incertidumbre sigma\n",
" \"\"\"\n",
" L = np.ones(len(mu))\n",
" \n",
" \"\"\"\n",
" for x_i in (x):\n",
" L *= (1.0/np.sqrt(2.0*np.pi*sigma**2))*np.exp(-0.5*(x_i-mu)**2/(sigma**2))\n",
" return L\n",
" \"\"\"\n",
" for i in range(len(x)):\n",
" L += np.sum((a[1:]*x[i]+a[0]-y[i])**2)**((len(y)*-1.)/-2.)\n",
" return L\n",
"\n",
"def posterior(mu, x, sigma,a):\n",
" \"\"\"\n",
" Posterior calculado con la normalizacion adecuada\n",
" \"\"\"\n",
" post = like(x, sigma, mu,a) * prior(mu)\n",
" evidencia = np.trapz(post, mu)\n",
" return post/evidencia\n",
"\n",
"def estimados(x,y,sigma,a):\n",
" w=2./sigma**2\n",
" alfa=w*np.sum(x**2)\n",
" beta=len(x)*w\n",
" gamma=np.sum(x)*w\n",
" p=np.sum(x*y)*w\n",
" q=np.sum(y)*w\n",
" m=(beta*p-gamma*q)/(alfa*beta-gamma**2)\n",
" c=(alfa*q-gamma*p)/(alfa*beta-gamma**2)\n",
" return m,c\n",
"\n",
"def maximo_incertidumbre(x, y):\n",
" deltax = x[1] - x[0]\n",
"\n",
" # maximo de y\n",
" ii = np.argmax(y)\n",
"\n",
" # segunda derivada\n",
" d = (y[ii+1] - 2*y[ii] + y[ii-1]) / (deltax**2)\n",
"\n",
" return x[ii], 1.0/np.sqrt(-d)\n",
" \n",
"def newsigma(a,b,x,y):\n",
" return np.sum((a*x+c-y)**2)**(2)/(len/(y)-1)\n",
"\n",
"x = np.linspace(-4.0,4.0,1000)\n",
"y = np.linspace(-4.0,4.0,1000)\n",
"\n",
"def metropolis(x):\n",
" \n",
" x_walk=np.empty([0,5])\n",
" x0=[(np.random.random()-0.5),(np.random.random()-0.5),(np.random.random()-0.5),(np.random.random()-0.5),(np.random.random()-0.5)]\n",
" x_walk=np.vstack((x_walk,x0))\n",
" \n",
" for i in range(20000):\n",
" x_guess=[np.random.normal(x_walk[i],0.1),np.random.normal(x_walk[i],0.1),np.random.normal(x_walk[i],0.1),np.random.normal(x_walk[i],0.1),np.random.normal(x_walk[i],0.1)]\n",
" print (x_walk[i])\n",
" \n",
" a=posterior(mu,x,sigma,x_guess)/posterior(mu,x,sigma,x_walk[i])\n",
" if a>=1.:\n",
" x_walk=np.vstack((x_walk,x_guess))\n",
" \n",
" else:\n",
" b=np.random.random()\n",
" if a>=b:\n",
" x_walk=np.vstack((x_walk,x_guess))\n",
" else:\n",
" x_walk=np.vstack((x_walk,x_walk[i]))\n",
" return x_walk()\n",
" \n",
" \n",
" \n",
" \n",
"\n",
"\n",
"\n",
"\n",
" \n",
"\n",
"data = np.loadtxt(\"notas_andes.dat\", skiprows=1)\n",
"Y = data[:,4]\n",
"X = data[:,:4]\n",
"mu = np.linspace(1E-4, 10.0, 1000)\n",
"sigma=0.1\n",
"\n",
"\n",
"plt.figure()\n",
"for i in range(5):\n",
" plt.subplot(2,2,i+1)\n",
" betas=metropolis(X)\n",
" plt.hist(betas[:,i],20)\n",
" plt.title(r\"$m_{}={:.2f}$ $c_{}={:.2f}$\".format(i+1,m,i+1,c))\n",
" plt.xlabel(\"x\")\n",
" \n",
"\n",
"plt.subplots_adjust(hspace=0.55)\n",
"plt.savefig(\"ajuste_bayes-mcmc.png\", bbox_inches='tight')\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"m1,c1=estimados(X[:,0],Y,sigma)\n",
"\n",
"print (m1,c1)\n",
"\n",
"\n",
"plt.figure()\n",
"for i in range(4):\n",
" plt.subplot(2,2,i+1)\n",
" xf=np.linspace(np.amin(X[:,i]),np.amax(X[:,i]),1000)\n",
" m,c=estimados(X[:,i],Y,sigma)\n",
" plt.scatter(X[:,i], Y)\n",
" plt.plot(xf,m*xf+c)\n",
" plt.title(r\"$m_{}={:.2f}$ $c_{}={:.2f}$\".format(i+1,m,i+1,c))\n",
" plt.xlabel(\"x\")\n",
" \n",
"\n",
"plt.subplots_adjust(hspace=0.55)\n",
"plt.savefig(\"bayes.png\", bbox_inches='tight')\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\"\"\"\n",
"post = posterior(mu, X[:,0], sigma)\n",
"max, incertidumbre = maximo_incertidumbre(mu, np.log(post))\n",
"plt.figure()\n",
"plt.plot(mu, post)\n",
"plt.title('$\\mu$= {:.2f} $\\pm$ {:.2f}'.format(max, incertidumbre))\n",
"plt.xlabel('$\\mu$')\n",
"plt.ylabel('prob($\\mu$|datos)')\n",
"plt.savefig('mean.png')\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 77.666667
| 3,344
| 0.671069
| 2,767
| 18,174
| 4.383809
| 0.127936
| 0.209481
| 0.329431
| 0.227205
| 0.559852
| 0.543446
| 0.534048
| 0.526628
| 0.509481
| 0.507007
| 0
| 0.259407
| 0.115275
| 18,174
| 233
| 3,345
| 78
| 0.494993
| 0
| 0
| 0.381974
| 0
| 0.060086
| 0.739353
| 0.564103
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.008584
| 0
| 0.008584
| 0.012876
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
51efc0d579439f913806f46824ac9f38aeda1fb9
| 472
|
py
|
Python
|
narwhallet/core/kcl/bip_utils/bip44/__init__.py
|
Snider/narwhallet
|
0d528763c735f1e68b8264e302854d41e7cf1956
|
[
"MIT"
] | 3
|
2021-12-29T11:25:13.000Z
|
2022-01-16T13:57:17.000Z
|
narwhallet/core/kcl/bip_utils/bip44/__init__.py
|
Snider/narwhallet
|
0d528763c735f1e68b8264e302854d41e7cf1956
|
[
"MIT"
] | null | null | null |
narwhallet/core/kcl/bip_utils/bip44/__init__.py
|
Snider/narwhallet
|
0d528763c735f1e68b8264e302854d41e7cf1956
|
[
"MIT"
] | 1
|
2022-01-16T13:57:20.000Z
|
2022-01-16T13:57:20.000Z
|
from narwhallet.core.kcl.bip_utils.bip44.bip44_base_ex import Bip44DepthError, Bip44CoinNotAllowedError
from narwhallet.core.kcl.bip_utils.bip44.bip44_base import Bip44Changes, Bip44Coins, Bip44Levels
from narwhallet.core.kcl.bip_utils.bip44.bip44_keys import Bip44PublicKey, Bip44PrivateKey
from narwhallet.core.kcl.bip_utils.bip44.bip44 import Bip44
from narwhallet.core.kcl.bip_utils.bip44.bip49 import Bip49
from narwhallet.core.kcl.bip_utils.bip44.bip84 import Bip84
| 67.428571
| 103
| 0.868644
| 68
| 472
| 5.882353
| 0.294118
| 0.21
| 0.27
| 0.315
| 0.58
| 0.58
| 0.58
| 0.41
| 0.215
| 0
| 0
| 0.099099
| 0.059322
| 472
| 6
| 104
| 78.666667
| 0.801802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5c508fac9b57431d3270a882b9e47454f2cdd279
| 3,937
|
py
|
Python
|
resources/test_cases/python/cryptography/TestRule5.py
|
stg-tud/licma
|
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
|
[
"MIT"
] | 5
|
2021-09-13T11:24:13.000Z
|
2022-03-18T21:56:58.000Z
|
resources/test_cases/python/cryptography/TestRule5.py
|
stg-tud/licma
|
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
|
[
"MIT"
] | null | null | null |
resources/test_cases/python/cryptography/TestRule5.py
|
stg-tud/licma
|
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
|
[
"MIT"
] | 1
|
2021-09-13T06:02:20.000Z
|
2021-09-13T06:02:20.000Z
|
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
g_backend = default_backend()
g_count = 999
def p_example1_hard_coded(password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=999, backend=g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example2_local_variable(password, data):
count = 999
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=count, backend=g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example3_nested_local_variable(password, data):
count1 = 999
count2 = count1
count3 = count2
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=count3, backend=g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example_method_call(password, count, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=count, backend=g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example_nested_method_call(password, count, data):
return p_example_method_call(password, count, data)
def p_example4_direct_method_call(password, data):
count = 999
return p_example_method_call(password, count, data)
def p_example5_nested_method_call(password, data):
count = 999
return p_example_nested_method_call(password, count, data)
def p_example6_direct_g_variable_access(password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=g_count, backend=g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example7_indirect_g_variable_access(password, data):
count = g_count
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=count, backend=g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example8_warning_parameter_not_resolvable(password, count, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=count, backend=g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def n_example1_iterations_eq_1000(password, data):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=16, salt=b"12345678", iterations=1000, backend=g_backend)
key = kdf.derive(password)
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
| 36.453704
| 115
| 0.745999
| 505
| 3,937
| 5.633663
| 0.136634
| 0.050615
| 0.089631
| 0.078735
| 0.814763
| 0.8
| 0.8
| 0.795079
| 0.77188
| 0.74587
| 0
| 0.045858
| 0.141478
| 3,937
| 107
| 116
| 36.794393
| 0.795858
| 0
| 0
| 0.653333
| 0
| 0
| 0.016256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146667
| false
| 0.293333
| 0.053333
| 0.013333
| 0.346667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
5c8fc229e2d0019f171db64ae70c454b317fe8d2
| 125
|
py
|
Python
|
utils.py
|
alan-toledo/Python-Unit-Testing
|
46acc2478faa2ed2a5932e54ad04c0cf57d62994
|
[
"MIT"
] | null | null | null |
utils.py
|
alan-toledo/Python-Unit-Testing
|
46acc2478faa2ed2a5932e54ad04c0cf57d62994
|
[
"MIT"
] | null | null | null |
utils.py
|
alan-toledo/Python-Unit-Testing
|
46acc2478faa2ed2a5932e54ad04c0cf57d62994
|
[
"MIT"
] | null | null | null |
def get_max(lst):
return max(lst)
def get_min(lst):
return min(lst)
def get_avg(lst):
return sum(lst)/len(lst)
| 13.888889
| 28
| 0.648
| 23
| 125
| 3.391304
| 0.391304
| 0.230769
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208
| 125
| 8
| 29
| 15.625
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
5ccc75b28d5f875889435a942aaa0332ca4aa288
| 16,914
|
py
|
Python
|
tests/unit/test_FlowEntryManager.py
|
aristanetworks/DirectFlowAssist
|
16c594cb42edd8aa084c6dbb931c87bbdff81ed0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_FlowEntryManager.py
|
aristanetworks/DirectFlowAssist
|
16c594cb42edd8aa084c6dbb931c87bbdff81ed0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_FlowEntryManager.py
|
aristanetworks/DirectFlowAssist
|
16c594cb42edd8aa084c6dbb931c87bbdff81ed0
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2015 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
#
# pylint: disable = line-too-long
import sys
# sys.path.extend(['../..','../../persist_common', '../../persist_pan'])
import unittest
import logging
import config
from mock import Mock, patch
from directflow_assist import FlowEntryManager
TCAM_STATS = {'num_avail': 1000,
'num_used': 500,
'pct_used': 50}
ACTIVE_FLOWS_1 = [
{ "priority": 40,
"matchPackets": 20,
"matchBytes": 0,
"bridgeMacAddr": "00:1c:73:74:81:9e",
"name": "BYPASS_FW_ping_ICMP_172-22-28-42_172-22-225-127_May26_11:19:20_RSP",
"action": {
"outputNormal": False,
"outputLocal": False,
"ipTos": 8,
"loopback": False,
"outInterfaces": [
"Port-Channel10"
],
"vlanPCP": 3,
"egrMirrorInterfaces": [],
"outputAll": False,
"outputController": False,
"outputDrop": False,
"outputFlood": False,
"ingrMirrorInterfaces": []
},
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"match": {
"inInterfaces": [
"Port-Channel20"
],
"unknownL3V4MulticastAddress": False,
"ethType": 2048,
"ethTypeMask": 65535,
"tcpSyn": False,
"ipSrc": {
"mask": "255.255.255.255",
"ip": "172.22.28.42"
},
"tcpPsh": False,
"tcpUrg": False,
"tcpFin": False,
"tcpRst": False,
"ipProto": 1,
"unknownL2V4MulticastAddress": False,
"tcpAck": False,
"ipDst": {
"mask": "255.255.255.255",
"ip": "172.22.225.127"
}
}
},
{
"priority": 40,
"matchPackets": 10,
"matchBytes": 0,
"bridgeMacAddr": "00:1c:73:74:81:9e",
"name": "BYPASS_FW_ping_ICMP_172-22-225-127_172-22-28-42_May26_11:19:20_INI",
"action": {
"outputNormal": False,
"outputLocal": False,
"ipTos": 8,
"loopback": False,
"outInterfaces": [
"Port-Channel20"
],
"vlanPCP": 3,
"egrMirrorInterfaces": [],
"outputAll": False,
"outputController": False,
"outputDrop": False,
"outputFlood": False,
"ingrMirrorInterfaces": []
},
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"match": {
"inInterfaces": [
"Port-Channel10"
],
"unknownL3V4MulticastAddress": False,
"ethType": 2048,
"ethTypeMask": 65535,
"tcpSyn": False,
"ipSrc": {
"mask": "255.255.255.255",
"ip": "172.22.225.127"
},
"tcpPsh": False,
"tcpUrg": False,
"tcpFin": False,
"tcpRst": False,
"ipProto": 1,
"unknownL2V4MulticastAddress": False,
"tcpAck": False,
"ipDst": {
"mask": "255.255.255.255",
"ip": "172.22.28.42"
}
}
}]
ACTIVE_FLOWS_2 = [
{ "priority": 40,
"matchPackets": 620,
"matchBytes": 0,
"bridgeMacAddr": "00:1c:73:74:81:9e",
"name": "BYPASS_FW_ping_ICMP_172-22-28-42_172-22-225-127_May26_11:19:20_RSP",
"action": {
"outputNormal": False,
"outputLocal": False,
"ipTos": 8,
"loopback": False,
"outInterfaces": [
"Port-Channel10"
],
"vlanPCP": 3,
"egrMirrorInterfaces": [],
"outputAll": False,
"outputController": False,
"outputDrop": False,
"outputFlood": False,
"ingrMirrorInterfaces": []
},
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"match": {
"inInterfaces": [
"Port-Channel20"
],
"unknownL3V4MulticastAddress": False,
"ethType": 2048,
"ethTypeMask": 65535,
"tcpSyn": False,
"ipSrc": {
"mask": "255.255.255.255",
"ip": "172.22.28.42"
},
"tcpPsh": False,
"tcpUrg": False,
"tcpFin": False,
"tcpRst": False,
"ipProto": 1,
"unknownL2V4MulticastAddress": False,
"tcpAck": False,
"ipDst": {
"mask": "255.255.255.255",
"ip": "172.22.225.127"
}
}
},
{
"priority": 40,
"matchPackets": 310,
"matchBytes": 0,
"bridgeMacAddr": "00:1c:73:74:81:9e",
"name": "BYPASS_FW_ping_ICMP_172-22-225-127_172-22-28-42_May26_11:19:20_INI",
"action": {
"outputNormal": False,
"outputLocal": False,
"ipTos": 8,
"loopback": False,
"outInterfaces": [
"Port-Channel20"
],
"vlanPCP": 3,
"egrMirrorInterfaces": [],
"outputAll": False,
"outputController": False,
"outputDrop": False,
"outputFlood": False,
"ingrMirrorInterfaces": []
},
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"match": {
"inInterfaces": [
"Port-Channel10"
],
"unknownL3V4MulticastAddress": False,
"ethType": 2048,
"ethTypeMask": 65535,
"tcpSyn": False,
"ipSrc": {
"mask": "255.255.255.255",
"ip": "172.22.225.127"
},
"tcpPsh": False,
"tcpUrg": False,
"tcpFin": False,
"tcpRst": False,
"ipProto": 1,
"unknownL2V4MulticastAddress": False,
"tcpAck": False,
"ipDst": {
"mask": "255.255.255.255",
"ip": "172.22.28.42"
}
}
}]
ACTIVE_FLOWS_3 = [
{"name": "BYPASS_FW_1_INI",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.1"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.2"}}},
{"name": "BYPASS_FW_1_RSP",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.2"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.1"}}},
{"name": "BYPASS_FW_2_INI",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.3"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.4"}}},
{"name": "BYPASS_FW_2_RSP",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.4"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.3"}}},
{"name": "BYPASS_FW_3_INI",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.5"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.6"}}},
{"name": "BYPASS_FW_3_RSP",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.6"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.5"}}},
{"name": "BYPASS_FW_4_INI",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.7"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.8"}}},
{"name": "BYPASS_FW_4_RSP",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.8"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.7"}}},
{"name": "BYPASS_FW_5_INI",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.9"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.10"}}},
{"name": "BYPASS_FW_5_RSP",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.10"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.9"}}},
]
ACTIVE_FLOWS_4 = [
{"name": "BYPASS_FW_1_INI",
"priority": 40,
"matchPackets": 6000,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.1"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.2"}}},
{"name": "BYPASS_FW_1_RSP",
"priority": 40,
"matchPackets": 6000,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.2"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.1"}}},
{"name": "BYPASS_FW_2_INI",
"priority": 40,
"matchPackets": 3000,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.3"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.4"}}},
{"name": "BYPASS_FW_2_RSP",
"priority": 40,
"matchPackets": 3000,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.4"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.3"}}},
{"name": "BYPASS_FW_3_INI",
"priority": 40,
"matchPackets": 600,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.5"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.6"}}},
{"name": "BYPASS_FW_3_RSP",
"priority": 40,
"matchPackets": 600,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.6"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.5"}}},
{"name": "BYPASS_FW_4_INI",
"priority": 40,
"matchPackets": 300,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.7"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.8"}}},
{"name": "BYPASS_FW_4_RSP",
"priority": 40,
"matchPackets": 300,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.8"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.7"}}},
{"name": "BYPASS_FW_5_INI",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel10"]},
"match": {"inInterfaces": ["Port-Channel20"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.9"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.10"}}},
{"name": "BYPASS_FW_5_RSP",
"priority": 40,
"matchPackets": 0,
"hardTimeout": 600,
"idleTimeout": 300,
"persistent": False,
"action": {"outInterfaces": ["Port-Channel20"]},
"match": {"inInterfaces": ["Port-Channel10"],
"ipSrc": {"mask": "255.255.255.255", "ip": "1.1.1.10"},
"ipProto": 6,
"ipDst": {"mask": "255.255.255.255", "ip": "1.1.1.9"}}},
]
DBG1= False
class TestFlowEntryManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_update_flow_rates_cache_and_rate_calcs(self):
fem = FlowEntryManager.FlowEntryMgr()
fem.directflow_switch.get_active_flows = Mock(return_value=ACTIVE_FLOWS_1)
fem.update_flow_rates_cache()
fem.directflow_switch.get_active_flows.assert_called_with()
key = 'ICMP_172.22.225.127:_172.22.28.42:'
self.assertTrue(key in fem.flow_rates_cache)
cache_entry = fem.flow_rates_cache[key]
self.assertTrue(cache_entry.is_current)
self.assertTrue(cache_entry.is_bypass)
self.assertEqual(cache_entry.rate, -1)
flow_ini_key = 'BYPASS_FW_ping_ICMP_172-22-225-127_172-22-28-42_May26_11:19:20_INI'
flow_rsp_key = 'BYPASS_FW_ping_ICMP_172-22-28-42_172-22-225-127_May26_11:19:20_RSP'
self.assertTrue(flow_ini_key in cache_entry.flows)
self.assertTrue(flow_rsp_key in cache_entry.flows)
fem.directflow_switch.get_active_flows = Mock(return_value=ACTIVE_FLOWS_2)
fem.update_flow_rates_cache()
self.assertEqual(cache_entry.rate, 7)
def test_reap_least_active_flows(self):
fem = FlowEntryManager.FlowEntryMgr()
fem.directflow_switch.get_active_flows = Mock(return_value=ACTIVE_FLOWS_3)
fem.update_flow_rates_cache()
if DBG1:
for k,v in fem.flow_rates_cache.items():
print 'A***flow_rates_cache: %s %s' % (k,v)
config.TCAM_REAP_THRESHOLD_PCT = 50
config.TCAM_REAP_LEAST_ACTIVE_PCT = 40
tcam_stats = {'num_avail': 20, 'num_used': 10, 'pct_used': 50}
fem.directflow_switch.tcam_directflow_utilization = Mock(return_value=tcam_stats)
fem.directflow_switch.get_active_flows = Mock(return_value=ACTIVE_FLOWS_4)
fem.directflow_switch.delete_flows = Mock()
fem.reap_least_active_flows(tcam_stats)
if DBG1:
print ('TCAM_REAP_THRESHOaLD_PCT: %d, TCAM_REAP_LEAST_ACTIVE_PCT: %d'
%(config.TCAM_REAP_THRESHOLD_PCT, config.TCAM_REAP_LEAST_ACTIVE_PCT))
for k,v in fem.flow_rates_cache.items():
print 'B***flow_rates_cache: %s %s' % (k,v)
# self.assertTrue(False) # force buffer dump (unittest -b option)
least_active = ['BYPASS_FW_5_INI', 'BYPASS_FW_5_RSP',
'BYPASS_FW_4_INI', 'BYPASS_FW_4_RSP']
fem.directflow_switch.delete_flows.assert_called_with(least_active)
if __name__ == '__main__':
unittest.main()
| 32.906615
| 91
| 0.523826
| 1,813
| 16,914
| 4.740761
| 0.100386
| 0.100524
| 0.100524
| 0.0726
| 0.881326
| 0.838045
| 0.827691
| 0.822804
| 0.81943
| 0.816638
| 0
| 0.126017
| 0.2803
| 16,914
| 513
| 92
| 32.97076
| 0.580054
| 0.016613
| 0
| 0.841996
| 0
| 0
| 0.40616
| 0.044514
| 0
| 0
| 0
| 0
| 0.018711
| 0
| null | null | 0.064449
| 0.012474
| null | null | 0.006237
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
7a3adbaad7e28ae93b1ec2a400dbd5d1bee5d910
| 992
|
py
|
Python
|
L1TriggerConfig/L1GtConfigProducers/python/L1GtConfig_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
L1TriggerConfig/L1GtConfigProducers/python/L1GtConfig_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
L1TriggerConfig/L1GtConfigProducers/python/L1GtConfig_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# cff file grouping all the L1 GT parameters
from L1TriggerConfig.L1GtConfigProducers.L1GtStableParametersConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtParametersConfig_cff import *
#
from L1TriggerConfig.L1GtConfigProducers.L1GtPrescaleFactorsAlgoTrigConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtPrescaleFactorsTechTrigConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskAlgoTrigConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskVetoAlgoTrigConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskVetoTechTrigConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtBoardMapsConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtPsbSetupConfig_cff import *
from L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMenuConfig_cff import *
| 49.6
| 87
| 0.899194
| 80
| 992
| 11.0125
| 0.35
| 0.23723
| 0.474461
| 0.317821
| 0.533485
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036598
| 0.063508
| 992
| 19
| 88
| 52.210526
| 0.911733
| 0.042339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7a44161420fbacd17e57d54a09570c3816d91bdb
| 13,632
|
py
|
Python
|
PhysicsTools/PatAlgos/python/triggerLayer1/triggerMatcherExamples_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
PhysicsTools/PatAlgos/python/triggerLayer1/triggerMatcherExamples_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
PhysicsTools/PatAlgos/python/triggerLayer1/triggerMatcherExamples_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# Examples for configurations of the trigger match for various physics objects
#
# A detailed description is given in
# https://twiki.cern.ch/twiki/bin/view/CMS/SWGuidePATTrigger#PATTriggerMatcher
# Cuts on the parameters
# - 'maxDPtRel' and
# - 'maxDeltaR'
# are NOT tuned (using old values from TQAF MC match, January 2008)
## Example matches ##
# firing trigger objects used in succeeding HLT path 'HLT_Mu17'
somePatMuonTriggerMatchHLTMu17 = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( "selectedPatMuons" )
, matched = cms.InputTag( "patTrigger" ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_Mu17_v*" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
# firing trigger objects used in succeeding HLT path 'HLT_DoubleMu5_IsoMu5'
somePatMuonTriggerMatchHLTDoubleMu5IsoMu5 = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( "selectedPatMuons" )
, matched = cms.InputTag( "patTrigger" ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_DoubleMu5_IsoMu5_v*" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
# firing trigger objects used in succeeding HLT path 'HLT_Photon26_Photon18'
somePatPhotonTriggerMatchHLTPhoton26Photon18 = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( "selectedPatPhotons" )
, matched = cms.InputTag( "patTrigger" ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_Photon26_Photon18_v*" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
# firing trigger objects used in succeeding HLT path 'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL'
somePatElectronTriggerMatchHLTEle17CaloIdTCaloIsoVLTrkIdVLTrkIsoVL = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( "selectedPatElectrons" )
, matched = cms.InputTag( "patTrigger" ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v*" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
# firing trigger objects used in succeeding HLT path 'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1'
somePatTauTriggerMatchHLTDoubleMediumIsoPFTau30Trk1eta2p1 = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( "selectedPatTaus" )
, matched = cms.InputTag( "patTrigger" ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_v*" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
# firing trigger objects used in succeeding HLT path 'HLT_PFJet40'
somePatJetTriggerMatchHLTPFJet40 = cms.EDProducer(
"PATTriggerMatcherDRLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( 'selectedPatJets' )
, matched = cms.InputTag( 'patTrigger' ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_PFJet40_v*" )' )
, maxDPtRel = cms.double( 3.0 )
, maxDeltaR = cms.double( 0.4 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
# firing trigger objects used in succeeding HLT path 'HLT_MET120'
somePatMetTriggerMatchHLTMET120 = cms.EDProducer(
"PATTriggerMatcherDRLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( 'patMETs' )
, matched = cms.InputTag( 'patTrigger' ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_MET120_v*" )' )
, maxDPtRel = cms.double( 3.0 )
, maxDeltaR = cms.double( 0.4 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
# firing trigger objects used in succeeding HLT path 'HLT_Mu8_DiJet30' (x-trigger)
somePatMuonTriggerMatchHLTMu8DiJet30 = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( "selectedPatMuons" )
, matched = cms.InputTag( "patTrigger" ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu8_DiJet30_v*" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
somePatJetTriggerMatchHLTMu8DiJet30 = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( "selectedPatJets" )
, matched = cms.InputTag( "patTrigger" ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'type( "TriggerJet" ) && path( "HLT_Mu8_DiJet30_v*" )' )
, maxDPtRel = cms.double( 3.0 )
, maxDeltaR = cms.double( 0.4 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
_exampleTriggerMatchers = [ 'somePatMuonTriggerMatchHLTMu17'
, 'somePatMuonTriggerMatchHLTDoubleMu5IsoMu5'
, 'somePatPhotonTriggerMatchHLTPhoton26Photon18'
, 'somePatElectronTriggerMatchHLTEle17CaloIdTCaloIsoVLTrkIdVLTrkIsoVL'
, 'somePatTauTriggerMatchHLTDoubleMediumIsoPFTau30Trk1eta2p1'
, 'somePatJetTriggerMatchHLTPFJet40'
, 'somePatMetTriggerMatchHLTMET120'
, 'somePatMuonTriggerMatchHLTMu8DiJet30'
, 'somePatJetTriggerMatchHLTMu8DiJet30'
]
## Further examples ##
# L1 e/gammas by original collection
somePatElectronTriggerMatchL1EGammaCollection = cms.EDProducer(
"PATTriggerMatcherDRLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( 'selectedPatElectrons' )
, matched = cms.InputTag( 'patTrigger' ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'coll( "l1extraParticles:NonIsolated" ) || coll( "l1extraParticles:Isolated" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( False ) # take first match found per reco object
)
# L1 and HLT muons by ID
somePatMuonTriggerMatchTriggerMuon = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR and DeltaPt, best match by DeltaR
, src = cms.InputTag( 'selectedPatMuons' )
, matched = cms.InputTag( 'patTrigger' ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'type( "TriggerL1Mu" ) || type( "TriggerMuon" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( False ) # take first match found per reco object
)
# firing trigger objects used in succeeding HLT paths of PD /SingleMu
somePatMuonTriggerMatchPDSingleMu = cms.EDProducer(
"PATTriggerMatcherDRDPtLessByR" # match by DeltaR and DeltaPt, best match by DeltaR
, src = cms.InputTag( 'selectedPatMuons' )
, matched = cms.InputTag( 'patTrigger' ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_RelIso1p0Mu5_v*" ) || path( "HLT_RelIso1p0Mu20_v*" ) || path( "HLT_Mu5_v*" ) || path( "HLT_Mu50_eta2p1_v*" ) || path( "HLT_Mu40_v*" ) || path( "HLT_Mu40_eta2p1_v*" ) || path( "HLT_Mu40_eta2p1_Track60_dEdx3p7_v*" ) || path( "HLT_Mu40_eta2p1_Track50_dEdx3p6_v*" ) || path( "HLT_Mu30_v*" ) || path( "HLT_Mu30_eta2p1_v*" ) || path( "HLT_Mu24_v*" ) || path( "HLT_Mu24_eta2p1_v*" ) || path( "HLT_Mu24_PFJet30_PFJet25_Deta3_CentralPFJet25_v*" ) || path( "HLT_Mu24_CentralPFJet30_CentralPFJet25_v*" ) || path( "HLT_Mu24_CentralPFJet30_CentralPFJet25_v*" ) || path( "HLT_Mu17_eta2p1_TriCentralPFNoPUJet45_35_25_v*" ) || path( "HLT_Mu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v*" ) || path( "HLT_Mu15_eta2p1_v*" ) || path( "HLT_Mu15_eta2p1_TriCentral_40_20_20_v*" ) || path( "HLT_Mu15_eta2p1_TriCentral_40_20_20_DiBTagIP3D1stTrack_v*" ) || path( "HLT_Mu15_eta2p1_TriCentral_40_20_20_BTagIP3D1stTrack_v*" ) || path( "HLT_Mu15_eta2p1_L1Mu10erJetC12WdEtaPhi1DiJetsC_v*" ) || path( "HLT_Mu12_v*" ) || path( "HLT_Mu12_eta2p1_L1Mu10erJetC12WdEtaPhi1DiJetsC_v*" ) || path( "HLT_Mu12_eta2p1_DiCentral_40_20_v*" ) || path( "HLT_Mu12_eta2p1_DiCentral_40_20_DiBTagIP3D1stTrack_v*" ) || path( "HLT_Mu12_eta2p1_DiCentral_20_v*" ) || path( "HLT_L2Mu70_2Cha_eta2p1_PFMET60_v*" ) || path( "HLT_L2Mu70_2Cha_eta2p1_PFMET55_v*" ) || path( "HLT_IsoMu40_eta2p1_v*" ) || path( "HLT_IsoMu34_eta2p1_v*" ) || path( "HLT_IsoMu30_v*" ) || path( "HLT_IsoMu30_eta2p1_v*" ) || path( "HLT_IsoMu24_v*" ) || path( "HLT_IsoMu24_eta2p1_v*" ) || path( "HLT_IsoMu24_PFJet30_PFJet25_Deta3_CentralPFJet25_v*" ) || path( "HLT_IsoMu24_CentralPFJet30_CentralPFJet25_v*" ) || path( "HLT_IsoMu24_CentralPFJet30_CentralPFJet25_PFMET20_v*" ) || path( "HLT_IsoMu20_eta2p1_v*" ) || path( "HLT_IsoMu20_eta2p1_CentralPFJet80_v*" ) || path( "HLT_IsoMu20_WCandPt80_v*" ) || path( "HLT_IsoMu17_eta2p1_TriCentralPFNoPUJet45_35_25_v*" ) || path( "HLT_IsoMu17_eta2p1_TriCentralPFNoPUJet30_v*" ) || path( "HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_v*" ) || path( "HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_v*" ) || path( "HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v*" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
# all trigger objects used in HLT path 'HLT_Mu17' (fake MET)
somePatMetTriggerMatchHLTMu17 = cms.EDProducer(
"PATTriggerMatcherDRLessByR" # match by DeltaR only, best match by DeltaR
, src = cms.InputTag( 'patMETs' )
, matched = cms.InputTag( 'patTrigger' ) # default producer label as defined in PhysicsTools/PatAlgos/python/triggerLayer1/triggerProducer_cfi.py
, matchedCuts = cms.string( 'path( "HLT_Mu17_v*" )' )
, maxDPtRel = cms.double( 0.5 )
, maxDeltaR = cms.double( 0.5 )
, resolveAmbiguities = cms.bool( True ) # only one match per trigger object
, resolveByMatchQuality = cms.bool( True ) # take best match found per reco object: by DeltaR here (s. above)
)
triggerMatcherExamplesTask = cms.Task(
somePatMuonTriggerMatchHLTMu17,
somePatMuonTriggerMatchHLTDoubleMu5IsoMu5,
somePatPhotonTriggerMatchHLTPhoton26Photon18,
somePatElectronTriggerMatchHLTEle17CaloIdTCaloIsoVLTrkIdVLTrkIsoVL,
somePatTauTriggerMatchHLTDoubleMediumIsoPFTau30Trk1eta2p1,
somePatJetTriggerMatchHLTPFJet40,
somePatMetTriggerMatchHLTMET120,
somePatMuonTriggerMatchHLTMu8DiJet30,
somePatJetTriggerMatchHLTMu8DiJet30,
somePatElectronTriggerMatchL1EGammaCollection,
somePatMuonTriggerMatchTriggerMuon,
somePatMuonTriggerMatchPDSingleMu,
somePatMetTriggerMatchHLTMu17
)
| 68.502513
| 2,144
| 0.723958
| 1,478
| 13,632
| 6.508796
| 0.140054
| 0.047297
| 0.037422
| 0.022869
| 0.818815
| 0.777547
| 0.752911
| 0.718919
| 0.70894
| 0.616216
| 0
| 0.043288
| 0.17811
| 13,632
| 198
| 2,145
| 68.848485
| 0.815334
| 0.314407
| 0
| 0.557692
| 0
| 0.00641
| 0.395941
| 0.248813
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00641
| 0
| 0.00641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a9aa6095ee0985cf37765c882f902abec0f302c
| 24,592
|
py
|
Python
|
sdk/python/pulumi_snowflake/pipe.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-07-01T17:03:33.000Z
|
2022-03-01T19:29:04.000Z
|
sdk/python/pulumi_snowflake/pipe.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 102
|
2021-07-14T13:12:58.000Z
|
2022-03-31T18:34:04.000Z
|
sdk/python/pulumi_snowflake/pipe.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-25T07:24:45.000Z
|
2022-03-25T07:24:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['PipeArgs', 'Pipe']
@pulumi.input_type
class PipeArgs:
def __init__(__self__, *,
copy_statement: pulumi.Input[str],
database: pulumi.Input[str],
schema: pulumi.Input[str],
auto_ingest: Optional[pulumi.Input[bool]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
error_integration: Optional[pulumi.Input[str]] = None,
integration: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Pipe resource.
:param pulumi.Input[str] copy_statement: Specifies the copy statement for the pipe.
:param pulumi.Input[str] database: The database in which to create the pipe.
:param pulumi.Input[str] schema: The schema in which to create the pipe.
:param pulumi.Input[bool] auto_ingest: Specifies a auto_ingest param for the pipe.
:param pulumi.Input[str] aws_sns_topic_arn: Specifies the Amazon Resource Name (ARN) for the SNS topic for your S3 bucket.
:param pulumi.Input[str] comment: Specifies a comment for the pipe.
:param pulumi.Input[str] error_integration: Specifies the name of the notification integration used for error notifications.
:param pulumi.Input[str] integration: Specifies an integration for the pipe.
:param pulumi.Input[str] name: Specifies the identifier for the pipe; must be unique for the database and schema in which the pipe is created.
"""
pulumi.set(__self__, "copy_statement", copy_statement)
pulumi.set(__self__, "database", database)
pulumi.set(__self__, "schema", schema)
if auto_ingest is not None:
pulumi.set(__self__, "auto_ingest", auto_ingest)
if aws_sns_topic_arn is not None:
pulumi.set(__self__, "aws_sns_topic_arn", aws_sns_topic_arn)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if error_integration is not None:
pulumi.set(__self__, "error_integration", error_integration)
if integration is not None:
pulumi.set(__self__, "integration", integration)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="copyStatement")
def copy_statement(self) -> pulumi.Input[str]:
"""
Specifies the copy statement for the pipe.
"""
return pulumi.get(self, "copy_statement")
@copy_statement.setter
def copy_statement(self, value: pulumi.Input[str]):
pulumi.set(self, "copy_statement", value)
@property
@pulumi.getter
def database(self) -> pulumi.Input[str]:
"""
The database in which to create the pipe.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: pulumi.Input[str]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def schema(self) -> pulumi.Input[str]:
"""
The schema in which to create the pipe.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: pulumi.Input[str]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter(name="autoIngest")
def auto_ingest(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies a auto_ingest param for the pipe.
"""
return pulumi.get(self, "auto_ingest")
@auto_ingest.setter
def auto_ingest(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_ingest", value)
@property
@pulumi.getter(name="awsSnsTopicArn")
def aws_sns_topic_arn(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Amazon Resource Name (ARN) for the SNS topic for your S3 bucket.
"""
return pulumi.get(self, "aws_sns_topic_arn")
@aws_sns_topic_arn.setter
def aws_sns_topic_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sns_topic_arn", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the pipe.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="errorIntegration")
def error_integration(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the notification integration used for error notifications.
"""
return pulumi.get(self, "error_integration")
@error_integration.setter
def error_integration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "error_integration", value)
@property
@pulumi.getter
def integration(self) -> Optional[pulumi.Input[str]]:
"""
Specifies an integration for the pipe.
"""
return pulumi.get(self, "integration")
@integration.setter
def integration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "integration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the pipe; must be unique for the database and schema in which the pipe is created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _PipeState:
def __init__(__self__, *,
auto_ingest: Optional[pulumi.Input[bool]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
copy_statement: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
error_integration: Optional[pulumi.Input[str]] = None,
integration: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channel: Optional[pulumi.Input[str]] = None,
owner: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Pipe resources.
:param pulumi.Input[bool] auto_ingest: Specifies a auto_ingest param for the pipe.
:param pulumi.Input[str] aws_sns_topic_arn: Specifies the Amazon Resource Name (ARN) for the SNS topic for your S3 bucket.
:param pulumi.Input[str] comment: Specifies a comment for the pipe.
:param pulumi.Input[str] copy_statement: Specifies the copy statement for the pipe.
:param pulumi.Input[str] database: The database in which to create the pipe.
:param pulumi.Input[str] error_integration: Specifies the name of the notification integration used for error notifications.
:param pulumi.Input[str] integration: Specifies an integration for the pipe.
:param pulumi.Input[str] name: Specifies the identifier for the pipe; must be unique for the database and schema in which the pipe is created.
:param pulumi.Input[str] notification_channel: Amazon Resource Name of the Amazon SQS queue for the stage named in the DEFINITION column.
:param pulumi.Input[str] owner: Name of the role that owns the pipe.
:param pulumi.Input[str] schema: The schema in which to create the pipe.
"""
if auto_ingest is not None:
pulumi.set(__self__, "auto_ingest", auto_ingest)
if aws_sns_topic_arn is not None:
pulumi.set(__self__, "aws_sns_topic_arn", aws_sns_topic_arn)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if copy_statement is not None:
pulumi.set(__self__, "copy_statement", copy_statement)
if database is not None:
pulumi.set(__self__, "database", database)
if error_integration is not None:
pulumi.set(__self__, "error_integration", error_integration)
if integration is not None:
pulumi.set(__self__, "integration", integration)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_channel is not None:
pulumi.set(__self__, "notification_channel", notification_channel)
if owner is not None:
pulumi.set(__self__, "owner", owner)
if schema is not None:
pulumi.set(__self__, "schema", schema)
@property
@pulumi.getter(name="autoIngest")
def auto_ingest(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies a auto_ingest param for the pipe.
"""
return pulumi.get(self, "auto_ingest")
@auto_ingest.setter
def auto_ingest(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_ingest", value)
@property
@pulumi.getter(name="awsSnsTopicArn")
def aws_sns_topic_arn(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Amazon Resource Name (ARN) for the SNS topic for your S3 bucket.
"""
return pulumi.get(self, "aws_sns_topic_arn")
@aws_sns_topic_arn.setter
def aws_sns_topic_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sns_topic_arn", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the pipe.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="copyStatement")
def copy_statement(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the copy statement for the pipe.
"""
return pulumi.get(self, "copy_statement")
@copy_statement.setter
def copy_statement(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "copy_statement", value)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
The database in which to create the pipe.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter(name="errorIntegration")
def error_integration(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the notification integration used for error notifications.
"""
return pulumi.get(self, "error_integration")
@error_integration.setter
def error_integration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "error_integration", value)
@property
@pulumi.getter
def integration(self) -> Optional[pulumi.Input[str]]:
"""
Specifies an integration for the pipe.
"""
return pulumi.get(self, "integration")
@integration.setter
def integration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "integration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the pipe; must be unique for the database and schema in which the pipe is created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationChannel")
def notification_channel(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name of the Amazon SQS queue for the stage named in the DEFINITION column.
"""
return pulumi.get(self, "notification_channel")
@notification_channel.setter
def notification_channel(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notification_channel", value)
@property
@pulumi.getter
def owner(self) -> Optional[pulumi.Input[str]]:
"""
Name of the role that owns the pipe.
"""
return pulumi.get(self, "owner")
@owner.setter
def owner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input[str]]:
"""
The schema in which to create the pipe.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema", value)
class Pipe(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_ingest: Optional[pulumi.Input[bool]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
copy_statement: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
error_integration: Optional[pulumi.Input[str]] = None,
integration: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
# format is database name | schema name | pipe name
```sh
$ pulumi import snowflake:index/pipe:Pipe example 'dbName|schemaName|pipeName'
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_ingest: Specifies a auto_ingest param for the pipe.
:param pulumi.Input[str] aws_sns_topic_arn: Specifies the Amazon Resource Name (ARN) for the SNS topic for your S3 bucket.
:param pulumi.Input[str] comment: Specifies a comment for the pipe.
:param pulumi.Input[str] copy_statement: Specifies the copy statement for the pipe.
:param pulumi.Input[str] database: The database in which to create the pipe.
:param pulumi.Input[str] error_integration: Specifies the name of the notification integration used for error notifications.
:param pulumi.Input[str] integration: Specifies an integration for the pipe.
:param pulumi.Input[str] name: Specifies the identifier for the pipe; must be unique for the database and schema in which the pipe is created.
:param pulumi.Input[str] schema: The schema in which to create the pipe.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PipeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
# format is database name | schema name | pipe name
```sh
$ pulumi import snowflake:index/pipe:Pipe example 'dbName|schemaName|pipeName'
```
:param str resource_name: The name of the resource.
:param PipeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PipeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_ingest: Optional[pulumi.Input[bool]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
copy_statement: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
error_integration: Optional[pulumi.Input[str]] = None,
integration: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PipeArgs.__new__(PipeArgs)
__props__.__dict__["auto_ingest"] = auto_ingest
__props__.__dict__["aws_sns_topic_arn"] = aws_sns_topic_arn
__props__.__dict__["comment"] = comment
if copy_statement is None and not opts.urn:
raise TypeError("Missing required property 'copy_statement'")
__props__.__dict__["copy_statement"] = copy_statement
if database is None and not opts.urn:
raise TypeError("Missing required property 'database'")
__props__.__dict__["database"] = database
__props__.__dict__["error_integration"] = error_integration
__props__.__dict__["integration"] = integration
__props__.__dict__["name"] = name
if schema is None and not opts.urn:
raise TypeError("Missing required property 'schema'")
__props__.__dict__["schema"] = schema
__props__.__dict__["notification_channel"] = None
__props__.__dict__["owner"] = None
super(Pipe, __self__).__init__(
'snowflake:index/pipe:Pipe',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
auto_ingest: Optional[pulumi.Input[bool]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
copy_statement: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
error_integration: Optional[pulumi.Input[str]] = None,
integration: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channel: Optional[pulumi.Input[str]] = None,
owner: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None) -> 'Pipe':
"""
Get an existing Pipe resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_ingest: Specifies a auto_ingest param for the pipe.
:param pulumi.Input[str] aws_sns_topic_arn: Specifies the Amazon Resource Name (ARN) for the SNS topic for your S3 bucket.
:param pulumi.Input[str] comment: Specifies a comment for the pipe.
:param pulumi.Input[str] copy_statement: Specifies the copy statement for the pipe.
:param pulumi.Input[str] database: The database in which to create the pipe.
:param pulumi.Input[str] error_integration: Specifies the name of the notification integration used for error notifications.
:param pulumi.Input[str] integration: Specifies an integration for the pipe.
:param pulumi.Input[str] name: Specifies the identifier for the pipe; must be unique for the database and schema in which the pipe is created.
:param pulumi.Input[str] notification_channel: Amazon Resource Name of the Amazon SQS queue for the stage named in the DEFINITION column.
:param pulumi.Input[str] owner: Name of the role that owns the pipe.
:param pulumi.Input[str] schema: The schema in which to create the pipe.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PipeState.__new__(_PipeState)
__props__.__dict__["auto_ingest"] = auto_ingest
__props__.__dict__["aws_sns_topic_arn"] = aws_sns_topic_arn
__props__.__dict__["comment"] = comment
__props__.__dict__["copy_statement"] = copy_statement
__props__.__dict__["database"] = database
__props__.__dict__["error_integration"] = error_integration
__props__.__dict__["integration"] = integration
__props__.__dict__["name"] = name
__props__.__dict__["notification_channel"] = notification_channel
__props__.__dict__["owner"] = owner
__props__.__dict__["schema"] = schema
return Pipe(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoIngest")
def auto_ingest(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies a auto_ingest param for the pipe.
"""
return pulumi.get(self, "auto_ingest")
@property
@pulumi.getter(name="awsSnsTopicArn")
def aws_sns_topic_arn(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the Amazon Resource Name (ARN) for the SNS topic for your S3 bucket.
"""
return pulumi.get(self, "aws_sns_topic_arn")
@property
@pulumi.getter
def comment(self) -> pulumi.Output[Optional[str]]:
"""
Specifies a comment for the pipe.
"""
return pulumi.get(self, "comment")
@property
@pulumi.getter(name="copyStatement")
def copy_statement(self) -> pulumi.Output[str]:
"""
Specifies the copy statement for the pipe.
"""
return pulumi.get(self, "copy_statement")
@property
@pulumi.getter
def database(self) -> pulumi.Output[str]:
"""
The database in which to create the pipe.
"""
return pulumi.get(self, "database")
@property
@pulumi.getter(name="errorIntegration")
def error_integration(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the name of the notification integration used for error notifications.
"""
return pulumi.get(self, "error_integration")
@property
@pulumi.getter
def integration(self) -> pulumi.Output[Optional[str]]:
"""
Specifies an integration for the pipe.
"""
return pulumi.get(self, "integration")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the identifier for the pipe; must be unique for the database and schema in which the pipe is created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationChannel")
def notification_channel(self) -> pulumi.Output[str]:
"""
Amazon Resource Name of the Amazon SQS queue for the stage named in the DEFINITION column.
"""
return pulumi.get(self, "notification_channel")
@property
@pulumi.getter
def owner(self) -> pulumi.Output[str]:
"""
Name of the role that owns the pipe.
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter
def schema(self) -> pulumi.Output[str]:
"""
The schema in which to create the pipe.
"""
return pulumi.get(self, "schema")
| 41.331092
| 150
| 0.637565
| 2,934
| 24,592
| 5.13531
| 0.058623
| 0.0971
| 0.109644
| 0.10367
| 0.878675
| 0.854782
| 0.830026
| 0.802217
| 0.79558
| 0.767439
| 0
| 0.000439
| 0.259719
| 24,592
| 594
| 151
| 41.400673
| 0.82719
| 0.279644
| 0
| 0.738028
| 1
| 0
| 0.092044
| 0.001522
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16338
| false
| 0.002817
| 0.014085
| 0
| 0.276056
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8fb24d4688c7013dba9ee5ff51a0a8adde676d3d
| 101
|
py
|
Python
|
external/synth/tests/test__nudging.py
|
jacnugent/fv3net
|
84958651bdd17784fdab98f87ad0d65414c03368
|
[
"MIT"
] | 5
|
2021-03-20T22:42:40.000Z
|
2021-06-30T18:39:36.000Z
|
external/synth/tests/test__nudging.py
|
jacnugent/fv3net
|
84958651bdd17784fdab98f87ad0d65414c03368
|
[
"MIT"
] | 195
|
2021-09-16T05:47:18.000Z
|
2022-03-31T22:03:15.000Z
|
external/synth/tests/test__nudging.py
|
ai2cm/fv3net
|
e62038aee0a97d6207e66baabd8938467838cf51
|
[
"MIT"
] | 1
|
2021-06-16T22:04:24.000Z
|
2021-06-16T22:04:24.000Z
|
from synth import generate_nudging
def test_generate_nudging(tmpdir):
generate_nudging(tmpdir)
| 16.833333
| 34
| 0.821782
| 13
| 101
| 6.076923
| 0.615385
| 0.56962
| 0.531646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 5
| 35
| 20.2
| 0.897727
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8fdbf4f9fb66c1fad6bd44e31e94f5e9bb50ae71
| 16,054
|
py
|
Python
|
api/controller/toolchain_unittest.py
|
khromiumos/chromiumos-chromite
|
a42a85481cdd9d635dc40a04585e427f89f3bb3f
|
[
"BSD-3-Clause"
] | null | null | null |
api/controller/toolchain_unittest.py
|
khromiumos/chromiumos-chromite
|
a42a85481cdd9d635dc40a04585e427f89f3bb3f
|
[
"BSD-3-Clause"
] | 2
|
2021-03-26T00:29:32.000Z
|
2021-04-30T21:29:33.000Z
|
api/controller/toolchain_unittest.py
|
khromiumos/chromiumos-chromite
|
a42a85481cdd9d635dc40a04585e427f89f3bb3f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for Toolchain-related operations."""
from __future__ import print_function
from chromite.api import api_config
from chromite.api import controller
from chromite.api.controller import toolchain
from chromite.api.gen.chromite.api import artifacts_pb2
from chromite.api.gen.chromite.api import sysroot_pb2
from chromite.api.gen.chromite.api import toolchain_pb2
from chromite.api.gen.chromiumos.builder_config_pb2 import BuilderConfig
from chromite.api.gen.chromiumos import common_pb2
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import toolchain_util
# pylint: disable=protected-access
class UpdateEbuildWithAFDOArtifactsTest(cros_test_lib.MockTestCase,
api_config.ApiConfigMixin):
"""Unittests for UpdateEbuildWithAFDOArtifacts."""
@staticmethod
def mock_die(message, *args):
raise cros_build_lib.DieSystemExit(message % args)
def setUp(self):
self.board = 'board'
self.response = toolchain_pb2.VerifyAFDOArtifactsResponse()
self.invalid_artifact_type = toolchain_pb2.BENCHMARK_AFDO
self.orderfile_command = self.PatchObject(
toolchain_util, 'OrderfileUpdateChromeEbuild', return_value=True)
self.kernel_command = self.PatchObject(
toolchain_util, 'AFDOUpdateKernelEbuild', return_value=True)
self.chrome_command = self.PatchObject(
toolchain_util, 'AFDOUpdateChromeEbuild', return_value=True)
self.PatchObject(cros_build_lib, 'Die', new=self.mock_die)
def _GetRequest(self, build_target=None, artifact_type=None):
return toolchain_pb2.VerifyAFDOArtifactsRequest(
build_target={'name': build_target},
artifact_type=artifact_type,
)
def testValidateOnly(self):
"""Sanity check that a validate only call does not execute any logic."""
patch = self.PatchObject(toolchain_util, 'OrderfileUpdateChromeEbuild')
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.ORDERFILE)
toolchain.UpdateEbuildWithAFDOArtifacts(request, self.response,
self.validate_only_config)
patch.assert_not_called()
def testMockCall(self):
"""Test that a mock call does not execute logic, returns mock value."""
patch = self.PatchObject(toolchain_util, 'OrderfileUpdateChromeEbuild')
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.ORDERFILE)
toolchain.UpdateEbuildWithAFDOArtifacts(request, self.response,
self.mock_call_config)
patch.assert_not_called()
self.assertEqual(self.response.status, True)
def testWrongArtifactType(self):
"""Test passing wrong artifact type."""
request = self._GetRequest(
build_target=self.board, artifact_type=self.invalid_artifact_type)
with self.assertRaises(cros_build_lib.DieSystemExit) as context:
toolchain.UpdateEbuildWithAFDOArtifacts(request, self.response,
self.api_config)
self.assertIn('artifact_type (%d) must be in' % self.invalid_artifact_type,
str(context.exception))
def testOrderfileSuccess(self):
"""Test the command is called correctly with orderfile."""
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.ORDERFILE)
toolchain.UpdateEbuildWithAFDOArtifacts(request, self.response,
self.api_config)
self.orderfile_command.assert_called_once_with(self.board)
self.kernel_command.assert_not_called()
self.chrome_command.assert_not_called()
def testKernelAFDOSuccess(self):
"""Test the command is called correctly with kernel afdo."""
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.KERNEL_AFDO)
toolchain.UpdateEbuildWithAFDOArtifacts(request, self.response,
self.api_config)
self.kernel_command.assert_called_once_with(self.board)
self.orderfile_command.assert_not_called()
self.chrome_command.assert_not_called()
def testChromeAFDOSuccess(self):
"""Test the command is called correctly with Chrome afdo."""
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.CHROME_AFDO)
toolchain.UpdateEbuildWithAFDOArtifacts(request, self.response,
self.api_config)
self.chrome_command.assert_called_once_with(self.board)
self.orderfile_command.assert_not_called()
self.kernel_command.assert_not_called()
class UploadVettedFDOArtifactsTest(UpdateEbuildWithAFDOArtifactsTest):
"""Unittests for UploadVettedAFDOArtifacts."""
@staticmethod
def mock_die(message, *args):
raise cros_build_lib.DieSystemExit(message % args)
def setUp(self):
self.board = 'board'
self.response = toolchain_pb2.VerifyAFDOArtifactsResponse()
self.invalid_artifact_type = toolchain_pb2.BENCHMARK_AFDO
self.command = self.PatchObject(
toolchain_util,
'UploadAndPublishVettedAFDOArtifacts',
return_value=True)
self.PatchObject(cros_build_lib, 'Die', new=self.mock_die)
def testValidateOnly(self):
"""Sanity check that a validate only call does not execute any logic."""
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.ORDERFILE)
toolchain.UploadVettedAFDOArtifacts(request, self.response,
self.validate_only_config)
self.command.assert_not_called()
def testMockCall(self):
"""Test that a mock call does not execute logic, returns mock value."""
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.ORDERFILE)
toolchain.UploadVettedAFDOArtifacts(request, self.response,
self.mock_call_config)
self.command.assert_not_called()
self.assertEqual(self.response.status, True)
def testWrongArtifactType(self):
"""Test passing wrong artifact type."""
request = self._GetRequest(
build_target=self.board, artifact_type=self.invalid_artifact_type)
with self.assertRaises(cros_build_lib.DieSystemExit) as context:
toolchain.UploadVettedAFDOArtifacts(request, self.response,
self.api_config)
self.assertIn('artifact_type (%d) must be in' % self.invalid_artifact_type,
str(context.exception))
def testOrderfileSuccess(self):
"""Test the command is called correctly with orderfile."""
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.ORDERFILE)
toolchain.UploadVettedAFDOArtifacts(request, self.response, self.api_config)
self.command.assert_called_once_with('orderfile', self.board)
def testKernelAFDOSuccess(self):
"""Test the command is called correctly with kernel afdo."""
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.KERNEL_AFDO)
toolchain.UploadVettedAFDOArtifacts(request, self.response, self.api_config)
self.command.assert_called_once_with('kernel_afdo', self.board)
def testChromeAFDOSuccess(self):
"""Test the command is called correctly with Chrome afdo."""
request = self._GetRequest(
build_target=self.board, artifact_type=toolchain_pb2.CHROME_AFDO)
toolchain.UploadVettedAFDOArtifacts(request, self.response, self.api_config)
self.command.assert_called_once_with('chrome_afdo', self.board)
class PrepareForBuildTest(cros_test_lib.MockTempDirTestCase,
api_config.ApiConfigMixin):
"""Unittests for PrepareForBuild."""
def setUp(self):
self.response = toolchain_pb2.PrepareForToolchainBuildResponse()
self.prep = self.PatchObject(
toolchain_util, 'PrepareForBuild',
return_value=toolchain_util.PrepareForBuildReturn.NEEDED)
self.bundle = self.PatchObject(
toolchain_util, 'BundleArtifacts', return_value=[])
self.PatchObject(toolchain, '_TOOLCHAIN_ARTIFACT_HANDLERS', {
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE:
toolchain._Handlers('UnverifiedChromeLlvmOrderfile',
self.prep, self.bundle),
})
def _GetRequest(
self, artifact_types=None, input_artifacts=None, additional_args=None):
chroot = common_pb2.Chroot(path=self.tempdir)
sysroot = sysroot_pb2.Sysroot(
path='/build/board', build_target=common_pb2.BuildTarget(name='board'))
return toolchain_pb2.PrepareForToolchainBuildRequest(
artifact_types=artifact_types, chroot=chroot, sysroot=sysroot,
input_artifacts=input_artifacts, additional_args=additional_args)
def testRaisesForUnknown(self):
request = self._GetRequest([BuilderConfig.Artifacts.IMAGE_ARCHIVES])
self.assertRaises(
KeyError,
toolchain.PrepareForBuild, request, self.response, self.api_config)
def testAcceptsNone(self):
request = toolchain_pb2.PrepareForToolchainBuildRequest(
artifact_types=[
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE],
chroot=None, sysroot=None)
toolchain.PrepareForBuild(request, self.response, self.api_config)
self.prep.assert_called_once_with(
'UnverifiedChromeLlvmOrderfile', None, '', '', {}, {})
def testHandlesUnknownInputArtifacts(self):
request = toolchain_pb2.PrepareForToolchainBuildRequest(
artifact_types=[
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE],
chroot=None, sysroot=None, input_artifacts=[
BuilderConfig.Artifacts.InputArtifactInfo(
input_artifact_type=BuilderConfig.Artifacts.IMAGE_ZIP,
input_artifact_gs_locations=['path1']),
])
toolchain.PrepareForBuild(request, self.response, self.api_config)
self.prep.assert_called_once_with(
'UnverifiedChromeLlvmOrderfile', None, '', '', {}, {})
def testPassesProfileInfo(self):
request = toolchain_pb2.PrepareForToolchainBuildRequest(
artifact_types=[
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE],
chroot=None, sysroot=None, input_artifacts=[
BuilderConfig.Artifacts.InputArtifactInfo(
input_artifact_type=\
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE,
input_artifact_gs_locations=['path1', 'path2']),
BuilderConfig.Artifacts.InputArtifactInfo(
input_artifact_type=\
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE,
input_artifact_gs_locations=['path3']),
],
profile_info=common_pb2.ArtifactProfileInfo(
chrome_cwp_profile='CWPVERSION'),
)
toolchain.PrepareForBuild(request, self.response, self.api_config)
self.prep.assert_called_once_with(
'UnverifiedChromeLlvmOrderfile', None, '', '', {
'UnverifiedChromeLlvmOrderfile': [
'gs://path1', 'gs://path2', 'gs://path3'],
},
{'chrome_cwp_profile': 'CWPVERSION'})
def testPassesProfileInfoAfdoRelease(self):
request = toolchain_pb2.PrepareForToolchainBuildRequest(
artifact_types=[
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE],
chroot=None, sysroot=None, input_artifacts=[
BuilderConfig.Artifacts.InputArtifactInfo(
input_artifact_type=\
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE,
input_artifact_gs_locations=['path1', 'path2']),
BuilderConfig.Artifacts.InputArtifactInfo(
input_artifact_type=\
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE,
input_artifact_gs_locations=['path3']),
],
profile_info=common_pb2.ArtifactProfileInfo(
afdo_release=common_pb2.AfdoRelease(
chrome_cwp_profile='CWPVERSION',
image_build_id=1234)),
)
toolchain.PrepareForBuild(request, self.response, self.api_config)
self.prep.assert_called_once_with(
'UnverifiedChromeLlvmOrderfile', None, '', '', {
'UnverifiedChromeLlvmOrderfile': [
'gs://path1', 'gs://path2', 'gs://path3'],
},
{'chrome_cwp_profile': 'CWPVERSION', 'image_build_id': 1234})
def testHandlesDuplicateInputArtifacts(self):
request = toolchain_pb2.PrepareForToolchainBuildRequest(
artifact_types=[
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE],
chroot=None, sysroot=None, input_artifacts=[
BuilderConfig.Artifacts.InputArtifactInfo(
input_artifact_type=\
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE,
input_artifact_gs_locations=['path1', 'path2']),
BuilderConfig.Artifacts.InputArtifactInfo(
input_artifact_type=\
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE,
input_artifact_gs_locations=['path3']),
])
toolchain.PrepareForBuild(request, self.response, self.api_config)
self.prep.assert_called_once_with(
'UnverifiedChromeLlvmOrderfile', None, '', '', {
'UnverifiedChromeLlvmOrderfile': [
'gs://path1', 'gs://path2', 'gs://path3'],
}, {})
class BundleToolchainTest(cros_test_lib.MockTempDirTestCase,
api_config.ApiConfigMixin):
"""Unittests for BundleToolchain."""
def setUp(self):
self.response = toolchain_pb2.BundleToolchainResponse()
self.prep = self.PatchObject(
toolchain_util, 'PrepareForBuild',
return_value=toolchain_util.PrepareForBuildReturn.NEEDED)
self.bundle = self.PatchObject(
toolchain_util, 'BundleArtifacts', return_value=[])
self.PatchObject(toolchain, '_TOOLCHAIN_ARTIFACT_HANDLERS', {
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE:
toolchain._Handlers('UnverifiedChromeLlvmOrderfile',
self.prep, self.bundle),
})
def _GetRequest(self, artifact_types=None):
chroot = common_pb2.Chroot(path=self.tempdir)
sysroot = sysroot_pb2.Sysroot(
path='/build/board', build_target=common_pb2.BuildTarget(name='board'))
return toolchain_pb2.BundleToolchainRequest(
chroot=chroot, sysroot=sysroot,
output_dir=self.tempdir,
artifact_types=artifact_types,
)
def testRaisesForUnknown(self):
request = self._GetRequest([BuilderConfig.Artifacts.IMAGE_ARCHIVES])
self.assertEqual(
controller.RETURN_CODE_UNRECOVERABLE,
toolchain.BundleArtifacts(request, self.response, self.api_config))
def testValidateOnly(self):
"""Sanity check that a validate only call does not execute any logic."""
request = self._GetRequest(
[BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE])
toolchain.BundleArtifacts(request, self.response,
self.validate_only_config)
self.bundle.assert_not_called()
def testSetsArtifactsInfo(self):
request = self._GetRequest(
[BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE])
self.bundle.return_value = ['artifact.xz']
toolchain.BundleArtifacts(request, self.response, self.api_config)
self.assertEqual(1, len(self.response.artifacts_info))
self.assertEqual(
self.response.artifacts_info[0],
toolchain_pb2.ArtifactInfo(
artifact_type=(
BuilderConfig.Artifacts.UNVERIFIED_CHROME_LLVM_ORDERFILE),
artifacts=[
artifacts_pb2.Artifact(path=self.bundle.return_value[0])]))
| 44.594444
| 80
| 0.704871
| 1,599
| 16,054
| 6.826141
| 0.124453
| 0.037288
| 0.036555
| 0.044251
| 0.831791
| 0.79322
| 0.786716
| 0.771232
| 0.727989
| 0.698672
| 0
| 0.005871
| 0.20431
| 16,054
| 359
| 81
| 44.718663
| 0.848599
| 0.070574
| 0
| 0.706485
| 0
| 0
| 0.064077
| 0.034129
| 0
| 0
| 0
| 0
| 0.109215
| 1
| 0.102389
| false
| 0.006826
| 0.040956
| 0.003413
| 0.167235
| 0.003413
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8fe1ec688bc27e9ee174fbb1920bc715a998d9e2
| 25
|
py
|
Python
|
src/auto_upload_demo_111/__init__.py
|
pganssle/auto_upload_demo_111
|
ffae8901f47614603097bf0aaee312b937ff67d8
|
[
"Apache-2.0"
] | null | null | null |
src/auto_upload_demo_111/__init__.py
|
pganssle/auto_upload_demo_111
|
ffae8901f47614603097bf0aaee312b937ff67d8
|
[
"Apache-2.0"
] | null | null | null |
src/auto_upload_demo_111/__init__.py
|
pganssle/auto_upload_demo_111
|
ffae8901f47614603097bf0aaee312b937ff67d8
|
[
"Apache-2.0"
] | null | null | null |
def f():
return "⸘‽"
| 8.333333
| 15
| 0.4
| 4
| 25
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.32
| 25
| 2
| 16
| 12.5
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
8ffc85b1c763e1da7a637941d3b6cbd94ceb6985
| 157
|
py
|
Python
|
pycozmo/audiokinetic/__init__.py
|
nalbion/pycozmo
|
35ee1ea741ecf7a39affc38d4ff5ad17865fea16
|
[
"MIT"
] | 123
|
2019-08-25T21:28:23.000Z
|
2022-03-12T13:54:59.000Z
|
pycozmo/audiokinetic/__init__.py
|
nalbion/pycozmo
|
35ee1ea741ecf7a39affc38d4ff5ad17865fea16
|
[
"MIT"
] | 41
|
2019-08-25T21:21:37.000Z
|
2022-02-09T14:20:54.000Z
|
pycozmo/audiokinetic/__init__.py
|
nalbion/pycozmo
|
35ee1ea741ecf7a39affc38d4ff5ad17865fea16
|
[
"MIT"
] | 51
|
2019-09-04T13:30:02.000Z
|
2022-01-09T01:20:24.000Z
|
from . import exception # noqa
from . import soundbank # noqa
from . import soundbanksinfo # noqa
from . import wem # noqa
| 26.166667
| 38
| 0.579618
| 16
| 157
| 5.6875
| 0.4375
| 0.43956
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.369427
| 157
| 5
| 39
| 31.4
| 0.919192
| 0.121019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8f24d5c899fb2de983bc6dca2b8e8ae64268780c
| 285
|
py
|
Python
|
zentral/contrib/osquery/views/__init__.py
|
gwhitehawk/zentral
|
156134aed3d7ff8a7cb40ab6f2269a763c316459
|
[
"Apache-2.0"
] | 1
|
2019-06-10T06:11:27.000Z
|
2019-06-10T06:11:27.000Z
|
zentral/contrib/osquery/views/__init__.py
|
gwhitehawk/zentral
|
156134aed3d7ff8a7cb40ab6f2269a763c316459
|
[
"Apache-2.0"
] | null | null | null |
zentral/contrib/osquery/views/__init__.py
|
gwhitehawk/zentral
|
156134aed3d7ff8a7cb40ab6f2269a763c316459
|
[
"Apache-2.0"
] | 1
|
2020-09-09T19:26:04.000Z
|
2020-09-09T19:26:04.000Z
|
from .api import * # NOQA
from .osquery_compliance_probe import * # NOQA
from .osquery_distributed_query_probe import * # NOQA
from .osquery_file_carve_probe import * # NOQA
from .osquery_fim_probe import * # NOQA
from .osquery_probe import * # NOQA
from .setup import * # NOQA
| 35.625
| 54
| 0.754386
| 39
| 285
| 5.230769
| 0.333333
| 0.343137
| 0.411765
| 0.514706
| 0.509804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17193
| 285
| 7
| 55
| 40.714286
| 0.864407
| 0.119298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
56e2d95cd755eca2597617bb6dcb51e0b0cee69a
| 4,955
|
py
|
Python
|
pymtl3/stdlib/queues/test/valrdy_queues_test.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | null | null | null |
pymtl3/stdlib/queues/test/valrdy_queues_test.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | null | null | null |
pymtl3/stdlib/queues/test/valrdy_queues_test.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | null | null | null |
from pymtl3 import *
from pymtl3.stdlib.ifcs import InValRdyIfc, OutValRdyIfc
from pymtl3.stdlib.test_utils import TestVectorSimulator
from ..valrdy_queues import *
def run_test_queue( model, test_vectors ):
# Define functions mapping the test vector to ports in model
def tv_in( model, tv ):
model.enq.val @= tv[0]
model.enq.msg @= tv[2]
model.deq.rdy @= tv[4]
def tv_out( model, tv ):
if tv[1] != '?': assert model.enq.rdy == tv[1]
if tv[3] != '?': assert model.deq.val == tv[3]
if tv[5] != '?': assert model.deq.msg == tv[5]
# Run the test
sim = TestVectorSimulator( model, test_vectors, tv_in, tv_out )
sim.run_test()
def test_bypass_Bits():
B1 = mk_bits(1)
B32 = mk_bits(32)
run_test_queue( BypassQueue1RTL( Bits32 ), [
# enq.val enq.rdy enq.msg deq.val deq.rdy deq.msg
[ B1(1) , B1(1) ,B32(123), B1(1) , B1(1) ,B32(123) ],
[ B1(1) , B1(1) ,B32(345), B1(1) , B1(0) ,B32(345) ],
[ B1(1) , B1(0) ,B32(567), B1(1) , B1(0) ,B32(345) ],
[ B1(1) , B1(0) ,B32(567), B1(1) , B1(1) ,B32(345) ],
[ B1(1) , B1(1) ,B32(567), B1(1) , B1(1) ,B32(567) ],
[ B1(0) , B1(1) ,B32(0 ), B1(0) , B1(1) , '?' ],
[ B1(0) , B1(1) ,B32(0 ), B1(0) , B1(0) , '?' ],
] )
def test_pipe_Bits():
B1 = mk_bits(1)
B32 = mk_bits(32)
run_test_queue( PipeQueue1RTL( Bits32 ), [
# enq.val enq.rdy enq.msg deq.val deq.rdy deq.msg
[ B1(1) , B1(1) ,B32(123), B1(0) , B1(1) , '?' ],
[ B1(1) , B1(0) ,B32(345), B1(1) , B1(0) ,B32(123) ],
[ B1(1) , B1(0) ,B32(567), B1(1) , B1(0) ,B32(123) ],
[ B1(1) , B1(1) ,B32(567), B1(1) , B1(1) ,B32(123) ],
[ B1(1) , B1(1) ,B32(789), B1(1) , B1(1) ,B32(567) ],
[ B1(0) , B1(1) ,B32(0 ), B1(1) , B1(1) ,B32(789) ],
[ B1(0) , B1(1) ,B32(0 ), B1(0) , B1(0) , '?' ],
] )
def test_normal_Bits():
B1 = mk_bits(1)
B32 = mk_bits(32)
run_test_queue( NormalQueue1RTL( Bits32 ), [
# enq.val enq.rdy enq.msg deq.val deq.rdy deq.msg
[ B1(1) , B1(1) ,B32(123), B1(0) , B1(1) , '?' ],
[ B1(1) , B1(0) ,B32(345), B1(1) , B1(0) ,B32(123) ],
[ B1(1) , B1(0) ,B32(567), B1(1) , B1(0) ,B32(123) ],
[ B1(1) , B1(0) ,B32(567), B1(1) , B1(1) ,B32(123) ],
[ B1(1) , B1(1) ,B32(567), B1(0) , B1(1) ,B32(123) ],
[ B1(0) , B1(0) ,B32(0 ), B1(1) , B1(1) ,B32(567) ],
[ B1(0) , B1(1) ,B32(0 ), B1(0) , B1(0) , '?' ],
] )
def test_2entry_normal_Bits():
"""Two Element Normal Queue."""
B1 = mk_bits(1)
B32 = mk_bits(32)
run_test_queue( NormalQueueRTL( 2, Bits32 ), [
# Enqueue one element and then dequeue it
# enq_val enq_rdy enq_bits deq_val deq_rdy deq_bits
[ B1(1), B1(1), B32(0x0001), B1(0), B1(1), '?' ],
[ B1(0), B1(1), B32(0x0000), B1(1), B1(1), B32(0x0001) ],
[ B1(0), B1(1), B32(0x0000), B1(0), B1(0), '?' ],
# Fill in the queue and enq/deq at the same time
# enq_val enq_rdy enq_bits deq_val deq_rdy deq_bits
[ B1(1), B1(1), B32(0x0002), B1(0), B1(0), '?' ],
[ B1(1), B1(1), B32(0x0003), B1(1), B1(0), B32(0x0002) ],
[ B1(0), B1(0), B32(0x0003), B1(1), B1(0), B32(0x0002) ],
[ B1(1), B1(0), B32(0x0003), B1(1), B1(0), B32(0x0002) ],
[ B1(1), B1(0), B32(0x0003), B1(1), B1(1), B32(0x0002) ],
[ B1(1), B1(1), B32(0x0004), B1(1), B1(0), '?' ],
[ B1(1), B1(0), B32(0x0004), B1(1), B1(1), B32(0x0003) ],
[ B1(1), B1(1), B32(0x0005), B1(1), B1(0), '?' ],
[ B1(0), B1(0), B32(0x0005), B1(1), B1(1), B32(0x0004) ],
[ B1(0), B1(1), B32(0x0005), B1(1), B1(1), B32(0x0005) ],
[ B1(0), B1(1), B32(0x0005), B1(0), B1(1), '?' ],
])
def test_3entry_normal_Bits():
"""Three Element Queue."""
B1 = mk_bits(1)
B32 = mk_bits(32)
run_test_queue( NormalQueueRTL( 3, Bits32 ), [
# Enqueue one element and then dequeue it
# enq_val enq_rdy enq_bits deq_val deq_rdy deq_bits
[ B1(1), B1(1), B32(0x0001), B1(0), B1(1), '?' ],
[ B1(0), B1(1), B32(0x0000), B1(1), B1(1), B32(0x0001) ],
[ B1(0), B1(1), B32(0x0000), B1(0), B1(0), '?' ],
# Fill in the queue and enq/deq at the same time
# enq_val enq_rdy enq_bits deq_val deq_rdy deq_bits
[ B1(1), B1(1), B32(0x0002), B1(0), B1(0), '?' ],
[ B1(1), B1(1), B32(0x0003), B1(1), B1(0), B32(0x0002) ],
[ B1(1), B1(1), B32(0x0004), B1(1), B1(0), B32(0x0002) ],
[ B1(1), B1(0), B32(0x0005), B1(1), B1(0), B32(0x0002) ],
[ B1(0), B1(0), B32(0x0005), B1(1), B1(0), B32(0x0002) ],
[ B1(1), B1(0), B32(0x0005), B1(1), B1(1), B32(0x0002) ],
[ B1(1), B1(1), B32(0x0005), B1(1), B1(1), B32(0x0003) ],
[ B1(1), B1(1), B32(0x0006), B1(1), B1(1), B32(0x0004) ],
[ B1(1), B1(1), B32(0x0007), B1(1), B1(1), B32(0x0005) ],
[ B1(0), B1(1), B32(0x0000), B1(1), B1(1), B32(0x0006) ],
[ B1(0), B1(1), B32(0x0000), B1(1), B1(1), B32(0x0007) ],
[ B1(0), B1(1), B32(0x0000), B1(0), B1(1), '?' ],
])
| 39.959677
| 65
| 0.50999
| 889
| 4,955
| 2.767154
| 0.091114
| 0.157317
| 0.146341
| 0.102439
| 0.769919
| 0.769512
| 0.766667
| 0.737805
| 0.732927
| 0.692276
| 0
| 0.255981
| 0.240767
| 4,955
| 123
| 66
| 40.284553
| 0.397927
| 0.129162
| 0
| 0.377778
| 0
| 0
| 0.004427
| 0
| 0
| 0
| 0.067102
| 0
| 0.033333
| 1
| 0.088889
| false
| 0.022222
| 0.044444
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
56ef980519620ed2b2323698dcacc6c6d579e42b
| 491
|
py
|
Python
|
python/ql/src/Classes/MaybeUndefinedClassAttribute.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 4,036
|
2020-04-29T00:09:57.000Z
|
2022-03-31T14:16:38.000Z
|
python/ql/src/Classes/MaybeUndefinedClassAttribute.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 2,970
|
2020-04-28T17:24:18.000Z
|
2022-03-31T22:40:46.000Z
|
python/ql/src/Classes/MaybeUndefinedClassAttribute.py
|
ScriptBox99/github-codeql
|
2ecf0d3264db8fb4904b2056964da469372a235c
|
[
"MIT"
] | 794
|
2020-04-29T00:28:25.000Z
|
2022-03-30T08:21:46.000Z
|
class Spam:
def __init__(self):
self.spam = 'spam, spam, spam'
def set_eggs(eggs):
self.eggs = eggs
def __str__(self):
return '%s and %s' % (self.spam, self.eggs) # Maybe uninitialized attribute 'eggs'
#Fixed version
class Spam:
def __init__(self):
self.spam = 'spam, spam, spam'
self.eggs = None
def set_eggs(eggs):
self.eggs = eggs
def __str__(self):
return '%s and %s' % (self.spam, self.eggs) # OK
| 18.884615
| 90
| 0.572301
| 66
| 491
| 3.984848
| 0.272727
| 0.18251
| 0.18251
| 0.121673
| 0.78327
| 0.78327
| 0.78327
| 0.78327
| 0.78327
| 0.78327
| 0
| 0
| 0.299389
| 491
| 25
| 91
| 19.64
| 0.764535
| 0.105906
| 0
| 0.933333
| 0
| 0
| 0.115207
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.133333
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
56f86b642c8ab327dc906048df6e13350acbc852
| 2,394
|
py
|
Python
|
userextensions/migrations/0005_auto_20201114_0548.py
|
davidslusser/django-userprofile
|
f98559f87a4759d8d6047c20a7b35b53ba25cf49
|
[
"Apache-2.0"
] | 2
|
2020-01-29T20:18:44.000Z
|
2020-08-28T16:12:36.000Z
|
userextensions/migrations/0005_auto_20201114_0548.py
|
davidslusser/django-userprofile
|
f98559f87a4759d8d6047c20a7b35b53ba25cf49
|
[
"Apache-2.0"
] | 5
|
2021-05-08T21:40:31.000Z
|
2022-03-10T22:54:50.000Z
|
userextensions/migrations/0005_auto_20201114_0548.py
|
davidslusser/django-userprofile
|
f98559f87a4759d8d6047c20a7b35b53ba25cf49
|
[
"Apache-2.0"
] | 2
|
2020-07-19T00:17:14.000Z
|
2021-04-02T15:42:13.000Z
|
# Generated by Django 2.2.15 on 2020-11-14 05:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userextensions', '0004_serviceaccount_serviceaccounttokenhistory'),
]
operations = [
migrations.AddField(
model_name='serviceaccount',
name='admin_enabled',
field=models.BooleanField(default=True, help_text='admin enable/disable state of service account'),
),
migrations.AlterField(
model_name='serviceaccount',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='serviceaccount',
name='enabled',
field=models.BooleanField(default=True, help_text='owner enable/disable state of service account'),
),
migrations.AlterField(
model_name='serviceaccount',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='theme',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='theme',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='userfavorite',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='userfavorite',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='userpreference',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='userpreference',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='userrecent',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='userrecent',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| 32.351351
| 111
| 0.581454
| 216
| 2,394
| 6.25
| 0.25
| 0.08
| 0.203704
| 0.236296
| 0.820741
| 0.820741
| 0.807407
| 0.746667
| 0.674074
| 0.638519
| 0
| 0.012165
| 0.313283
| 2,394
| 73
| 112
| 32.794521
| 0.809002
| 0.019215
| 0
| 0.820896
| 1
| 0
| 0.173913
| 0.019608
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014925
| 0
| 0.059701
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
711c3727ba44b63503d33ee15185399c4b8bdf35
| 47,700
|
py
|
Python
|
sdk/python/pulumi_aws_native/frauddetector/_inputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/frauddetector/_inputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/frauddetector/_inputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'DetectorEntityTypeArgs',
'DetectorEventTypeArgs',
'DetectorEventVariableArgs',
'DetectorLabelArgs',
'DetectorModelArgs',
'DetectorOutcomeArgs',
'DetectorRuleArgs',
'DetectorTagArgs',
'EntityTypeTagArgs',
'EventTypeEntityTypeArgs',
'EventTypeEventVariableArgs',
'EventTypeLabelArgs',
'EventTypeTagArgs',
'LabelTagArgs',
'OutcomeTagArgs',
'VariableTagArgs',
]
@pulumi.input_type
class DetectorEntityTypeArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inline: Optional[pulumi.Input[bool]] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]] = None):
"""
:param pulumi.Input[str] created_time: The time when the entity type was created.
:param pulumi.Input[str] description: The description.
:param pulumi.Input[str] last_updated_time: The time when the entity type was last updated.
:param pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]] tags: Tags associated with this entity type.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if inline is not None:
pulumi.set(__self__, "inline", inline)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the entity type was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def inline(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "inline")
@inline.setter
def inline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inline", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the entity type was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]:
"""
Tags associated with this entity type.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class DetectorEventTypeArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
entity_types: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorEntityTypeArgs']]]] = None,
event_variables: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorEventVariableArgs']]]] = None,
inline: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorLabelArgs']]]] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]] = None):
"""
:param pulumi.Input[str] arn: The ARN of the event type.
:param pulumi.Input[str] created_time: The time when the event type was created.
:param pulumi.Input[str] description: The description of the event type.
:param pulumi.Input[str] last_updated_time: The time when the event type was last updated.
:param pulumi.Input[str] name: The name for the event type
:param pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]] tags: Tags associated with this event type.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if entity_types is not None:
pulumi.set(__self__, "entity_types", entity_types)
if event_variables is not None:
pulumi.set(__self__, "event_variables", event_variables)
if inline is not None:
pulumi.set(__self__, "inline", inline)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the event type.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the event type.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="entityTypes")
def entity_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorEntityTypeArgs']]]]:
return pulumi.get(self, "entity_types")
@entity_types.setter
def entity_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorEntityTypeArgs']]]]):
pulumi.set(self, "entity_types", value)
@property
@pulumi.getter(name="eventVariables")
def event_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorEventVariableArgs']]]]:
return pulumi.get(self, "event_variables")
@event_variables.setter
def event_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorEventVariableArgs']]]]):
pulumi.set(self, "event_variables", value)
@property
@pulumi.getter
def inline(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "inline")
@inline.setter
def inline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inline", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorLabelArgs']]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorLabelArgs']]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the event type
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]:
"""
Tags associated with this event type.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class DetectorEventVariableArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
data_source: Optional[pulumi.Input['DetectorEventVariableDataSource']] = None,
data_type: Optional[pulumi.Input['DetectorEventVariableDataType']] = None,
default_value: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inline: Optional[pulumi.Input[bool]] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]] = None,
variable_type: Optional[pulumi.Input['DetectorEventVariableVariableType']] = None):
"""
:param pulumi.Input[str] created_time: The time when the event variable was created.
:param pulumi.Input[str] description: The description.
:param pulumi.Input[str] last_updated_time: The time when the event variable was last updated.
:param pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]] tags: Tags associated with this event variable.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if data_type is not None:
pulumi.set(__self__, "data_type", data_type)
if default_value is not None:
pulumi.set(__self__, "default_value", default_value)
if description is not None:
pulumi.set(__self__, "description", description)
if inline is not None:
pulumi.set(__self__, "inline", inline)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if variable_type is not None:
pulumi.set(__self__, "variable_type", variable_type)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event variable was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input['DetectorEventVariableDataSource']]:
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input['DetectorEventVariableDataSource']]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="dataType")
def data_type(self) -> Optional[pulumi.Input['DetectorEventVariableDataType']]:
return pulumi.get(self, "data_type")
@data_type.setter
def data_type(self, value: Optional[pulumi.Input['DetectorEventVariableDataType']]):
pulumi.set(self, "data_type", value)
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "default_value")
@default_value.setter
def default_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_value", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def inline(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "inline")
@inline.setter
def inline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inline", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event variable was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]:
"""
Tags associated with this event variable.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> Optional[pulumi.Input['DetectorEventVariableVariableType']]:
return pulumi.get(self, "variable_type")
@variable_type.setter
def variable_type(self, value: Optional[pulumi.Input['DetectorEventVariableVariableType']]):
pulumi.set(self, "variable_type", value)
@pulumi.input_type
class DetectorLabelArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inline: Optional[pulumi.Input[bool]] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]] = None):
"""
:param pulumi.Input[str] created_time: The time when the label was created.
:param pulumi.Input[str] description: The description.
:param pulumi.Input[str] last_updated_time: The time when the label was last updated.
:param pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]] tags: Tags associated with this label.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if inline is not None:
pulumi.set(__self__, "inline", inline)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the label was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def inline(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "inline")
@inline.setter
def inline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inline", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the label was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]:
"""
Tags associated with this label.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class DetectorModelArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None):
"""
A model to associate with a detector.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@pulumi.input_type
class DetectorOutcomeArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inline: Optional[pulumi.Input[bool]] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]] = None):
"""
:param pulumi.Input[str] created_time: The time when the outcome was created.
:param pulumi.Input[str] description: The description.
:param pulumi.Input[str] last_updated_time: The time when the outcome was last updated.
:param pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]] tags: Tags associated with this outcome.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if inline is not None:
pulumi.set(__self__, "inline", inline)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the outcome was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def inline(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "inline")
@inline.setter
def inline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inline", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the outcome was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]:
"""
Tags associated with this outcome.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class DetectorRuleArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
detector_id: Optional[pulumi.Input[str]] = None,
expression: Optional[pulumi.Input[str]] = None,
language: Optional[pulumi.Input['DetectorRuleLanguage']] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
outcomes: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorOutcomeArgs']]]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
rule_version: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]] = None):
"""
:param pulumi.Input[str] created_time: The time when the event type was created.
:param pulumi.Input[str] description: The description.
:param pulumi.Input[str] last_updated_time: The time when the event type was last updated.
:param pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]] tags: Tags associated with this event type.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if detector_id is not None:
pulumi.set(__self__, "detector_id", detector_id)
if expression is not None:
pulumi.set(__self__, "expression", expression)
if language is not None:
pulumi.set(__self__, "language", language)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if outcomes is not None:
pulumi.set(__self__, "outcomes", outcomes)
if rule_id is not None:
pulumi.set(__self__, "rule_id", rule_id)
if rule_version is not None:
pulumi.set(__self__, "rule_version", rule_version)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="detectorId")
def detector_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "detector_id")
@detector_id.setter
def detector_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "detector_id", value)
@property
@pulumi.getter
def expression(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def language(self) -> Optional[pulumi.Input['DetectorRuleLanguage']]:
return pulumi.get(self, "language")
@language.setter
def language(self, value: Optional[pulumi.Input['DetectorRuleLanguage']]):
pulumi.set(self, "language", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def outcomes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorOutcomeArgs']]]]:
return pulumi.get(self, "outcomes")
@outcomes.setter
def outcomes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorOutcomeArgs']]]]):
pulumi.set(self, "outcomes", value)
@property
@pulumi.getter(name="ruleId")
def rule_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "rule_id")
@rule_id.setter
def rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_id", value)
@property
@pulumi.getter(name="ruleVersion")
def rule_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "rule_version")
@rule_version.setter
def rule_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_version", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]:
"""
Tags associated with this event type.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class DetectorTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class EntityTypeTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class EventTypeEntityTypeArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inline: Optional[pulumi.Input[bool]] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]] = None):
"""
:param pulumi.Input[str] created_time: The time when the event type was created.
:param pulumi.Input[str] description: The description.
:param pulumi.Input[str] last_updated_time: The time when the event type was last updated.
:param pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]] tags: Tags associated with this event type.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if inline is not None:
pulumi.set(__self__, "inline", inline)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def inline(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "inline")
@inline.setter
def inline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inline", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]]:
"""
Tags associated with this event type.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class EventTypeEventVariableArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
data_source: Optional[pulumi.Input['EventTypeEventVariableDataSource']] = None,
data_type: Optional[pulumi.Input['EventTypeEventVariableDataType']] = None,
default_value: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inline: Optional[pulumi.Input[bool]] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]] = None,
variable_type: Optional[pulumi.Input['EventTypeEventVariableVariableType']] = None):
"""
:param pulumi.Input[str] created_time: The time when the event type was created.
:param pulumi.Input[str] description: The description.
:param pulumi.Input[str] last_updated_time: The time when the event type was last updated.
:param pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]] tags: Tags associated with this event type.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if data_type is not None:
pulumi.set(__self__, "data_type", data_type)
if default_value is not None:
pulumi.set(__self__, "default_value", default_value)
if description is not None:
pulumi.set(__self__, "description", description)
if inline is not None:
pulumi.set(__self__, "inline", inline)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if variable_type is not None:
pulumi.set(__self__, "variable_type", variable_type)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input['EventTypeEventVariableDataSource']]:
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input['EventTypeEventVariableDataSource']]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="dataType")
def data_type(self) -> Optional[pulumi.Input['EventTypeEventVariableDataType']]:
return pulumi.get(self, "data_type")
@data_type.setter
def data_type(self, value: Optional[pulumi.Input['EventTypeEventVariableDataType']]):
pulumi.set(self, "data_type", value)
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "default_value")
@default_value.setter
def default_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_value", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def inline(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "inline")
@inline.setter
def inline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inline", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]]:
"""
Tags associated with this event type.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> Optional[pulumi.Input['EventTypeEventVariableVariableType']]:
return pulumi.get(self, "variable_type")
@variable_type.setter
def variable_type(self, value: Optional[pulumi.Input['EventTypeEventVariableVariableType']]):
pulumi.set(self, "variable_type", value)
@pulumi.input_type
class EventTypeLabelArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
inline: Optional[pulumi.Input[bool]] = None,
last_updated_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]] = None):
"""
:param pulumi.Input[str] created_time: The time when the event type was created.
:param pulumi.Input[str] description: The description.
:param pulumi.Input[str] last_updated_time: The time when the event type was last updated.
:param pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]] tags: Tags associated with this event type.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if inline is not None:
pulumi.set(__self__, "inline", inline)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def inline(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "inline")
@inline.setter
def inline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "inline", value)
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the event type was last updated.
"""
return pulumi.get(self, "last_updated_time")
@last_updated_time.setter
def last_updated_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_updated_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]]:
"""
Tags associated with this event type.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EventTypeTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class EventTypeTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class LabelTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OutcomeTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class VariableTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
| 34.970674
| 118
| 0.627778
| 5,533
| 47,700
| 5.249232
| 0.023315
| 0.142026
| 0.155041
| 0.115893
| 0.919467
| 0.90177
| 0.888548
| 0.864172
| 0.854875
| 0.848402
| 0
| 0.000028
| 0.244046
| 47,700
| 1,363
| 119
| 34.996332
| 0.80543
| 0.098407
| 0
| 0.857732
| 1
| 0
| 0.099796
| 0.019801
| 0
| 0
| 0
| 0
| 0
| 1
| 0.204124
| false
| 0
| 0.006186
| 0.054639
| 0.320619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
85729873a942c7b96c7c25e93e815b2628d4e959
| 6,545
|
py
|
Python
|
loldib/getratings/models/NA/na_khazix/na_khazix_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_khazix/na_khazix_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_khazix/na_khazix_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Khazix_Mid_Aatrox(Ratings):
pass
class NA_Khazix_Mid_Ahri(Ratings):
pass
class NA_Khazix_Mid_Akali(Ratings):
pass
class NA_Khazix_Mid_Alistar(Ratings):
pass
class NA_Khazix_Mid_Amumu(Ratings):
pass
class NA_Khazix_Mid_Anivia(Ratings):
pass
class NA_Khazix_Mid_Annie(Ratings):
pass
class NA_Khazix_Mid_Ashe(Ratings):
pass
class NA_Khazix_Mid_AurelionSol(Ratings):
pass
class NA_Khazix_Mid_Azir(Ratings):
pass
class NA_Khazix_Mid_Bard(Ratings):
pass
class NA_Khazix_Mid_Blitzcrank(Ratings):
pass
class NA_Khazix_Mid_Brand(Ratings):
pass
class NA_Khazix_Mid_Braum(Ratings):
pass
class NA_Khazix_Mid_Caitlyn(Ratings):
pass
class NA_Khazix_Mid_Camille(Ratings):
pass
class NA_Khazix_Mid_Cassiopeia(Ratings):
pass
class NA_Khazix_Mid_Chogath(Ratings):
pass
class NA_Khazix_Mid_Corki(Ratings):
pass
class NA_Khazix_Mid_Darius(Ratings):
pass
class NA_Khazix_Mid_Diana(Ratings):
pass
class NA_Khazix_Mid_Draven(Ratings):
pass
class NA_Khazix_Mid_DrMundo(Ratings):
pass
class NA_Khazix_Mid_Ekko(Ratings):
pass
class NA_Khazix_Mid_Elise(Ratings):
pass
class NA_Khazix_Mid_Evelynn(Ratings):
pass
class NA_Khazix_Mid_Ezreal(Ratings):
pass
class NA_Khazix_Mid_Fiddlesticks(Ratings):
pass
class NA_Khazix_Mid_Fiora(Ratings):
pass
class NA_Khazix_Mid_Fizz(Ratings):
pass
class NA_Khazix_Mid_Galio(Ratings):
pass
class NA_Khazix_Mid_Gangplank(Ratings):
pass
class NA_Khazix_Mid_Garen(Ratings):
pass
class NA_Khazix_Mid_Gnar(Ratings):
pass
class NA_Khazix_Mid_Gragas(Ratings):
pass
class NA_Khazix_Mid_Graves(Ratings):
pass
class NA_Khazix_Mid_Hecarim(Ratings):
pass
class NA_Khazix_Mid_Heimerdinger(Ratings):
pass
class NA_Khazix_Mid_Illaoi(Ratings):
pass
class NA_Khazix_Mid_Irelia(Ratings):
pass
class NA_Khazix_Mid_Ivern(Ratings):
pass
class NA_Khazix_Mid_Janna(Ratings):
pass
class NA_Khazix_Mid_JarvanIV(Ratings):
pass
class NA_Khazix_Mid_Jax(Ratings):
pass
class NA_Khazix_Mid_Jayce(Ratings):
pass
class NA_Khazix_Mid_Jhin(Ratings):
pass
class NA_Khazix_Mid_Jinx(Ratings):
pass
class NA_Khazix_Mid_Kalista(Ratings):
pass
class NA_Khazix_Mid_Karma(Ratings):
pass
class NA_Khazix_Mid_Karthus(Ratings):
pass
class NA_Khazix_Mid_Kassadin(Ratings):
pass
class NA_Khazix_Mid_Katarina(Ratings):
pass
class NA_Khazix_Mid_Kayle(Ratings):
pass
class NA_Khazix_Mid_Kayn(Ratings):
pass
class NA_Khazix_Mid_Kennen(Ratings):
pass
class NA_Khazix_Mid_Khazix(Ratings):
pass
class NA_Khazix_Mid_Kindred(Ratings):
pass
class NA_Khazix_Mid_Kled(Ratings):
pass
class NA_Khazix_Mid_KogMaw(Ratings):
pass
class NA_Khazix_Mid_Leblanc(Ratings):
pass
class NA_Khazix_Mid_LeeSin(Ratings):
pass
class NA_Khazix_Mid_Leona(Ratings):
pass
class NA_Khazix_Mid_Lissandra(Ratings):
pass
class NA_Khazix_Mid_Lucian(Ratings):
pass
class NA_Khazix_Mid_Lulu(Ratings):
pass
class NA_Khazix_Mid_Lux(Ratings):
pass
class NA_Khazix_Mid_Malphite(Ratings):
pass
class NA_Khazix_Mid_Malzahar(Ratings):
pass
class NA_Khazix_Mid_Maokai(Ratings):
pass
class NA_Khazix_Mid_MasterYi(Ratings):
pass
class NA_Khazix_Mid_MissFortune(Ratings):
pass
class NA_Khazix_Mid_MonkeyKing(Ratings):
pass
class NA_Khazix_Mid_Mordekaiser(Ratings):
pass
class NA_Khazix_Mid_Morgana(Ratings):
pass
class NA_Khazix_Mid_Nami(Ratings):
pass
class NA_Khazix_Mid_Nasus(Ratings):
pass
class NA_Khazix_Mid_Nautilus(Ratings):
pass
class NA_Khazix_Mid_Nidalee(Ratings):
pass
class NA_Khazix_Mid_Nocturne(Ratings):
pass
class NA_Khazix_Mid_Nunu(Ratings):
pass
class NA_Khazix_Mid_Olaf(Ratings):
pass
class NA_Khazix_Mid_Orianna(Ratings):
pass
class NA_Khazix_Mid_Ornn(Ratings):
pass
class NA_Khazix_Mid_Pantheon(Ratings):
pass
class NA_Khazix_Mid_Poppy(Ratings):
pass
class NA_Khazix_Mid_Quinn(Ratings):
pass
class NA_Khazix_Mid_Rakan(Ratings):
pass
class NA_Khazix_Mid_Rammus(Ratings):
pass
class NA_Khazix_Mid_RekSai(Ratings):
pass
class NA_Khazix_Mid_Renekton(Ratings):
pass
class NA_Khazix_Mid_Rengar(Ratings):
pass
class NA_Khazix_Mid_Riven(Ratings):
pass
class NA_Khazix_Mid_Rumble(Ratings):
pass
class NA_Khazix_Mid_Ryze(Ratings):
pass
class NA_Khazix_Mid_Sejuani(Ratings):
pass
class NA_Khazix_Mid_Shaco(Ratings):
pass
class NA_Khazix_Mid_Shen(Ratings):
pass
class NA_Khazix_Mid_Shyvana(Ratings):
pass
class NA_Khazix_Mid_Singed(Ratings):
pass
class NA_Khazix_Mid_Sion(Ratings):
pass
class NA_Khazix_Mid_Sivir(Ratings):
pass
class NA_Khazix_Mid_Skarner(Ratings):
pass
class NA_Khazix_Mid_Sona(Ratings):
pass
class NA_Khazix_Mid_Soraka(Ratings):
pass
class NA_Khazix_Mid_Swain(Ratings):
pass
class NA_Khazix_Mid_Syndra(Ratings):
pass
class NA_Khazix_Mid_TahmKench(Ratings):
pass
class NA_Khazix_Mid_Taliyah(Ratings):
pass
class NA_Khazix_Mid_Talon(Ratings):
pass
class NA_Khazix_Mid_Taric(Ratings):
pass
class NA_Khazix_Mid_Teemo(Ratings):
pass
class NA_Khazix_Mid_Thresh(Ratings):
pass
class NA_Khazix_Mid_Tristana(Ratings):
pass
class NA_Khazix_Mid_Trundle(Ratings):
pass
class NA_Khazix_Mid_Tryndamere(Ratings):
pass
class NA_Khazix_Mid_TwistedFate(Ratings):
pass
class NA_Khazix_Mid_Twitch(Ratings):
pass
class NA_Khazix_Mid_Udyr(Ratings):
pass
class NA_Khazix_Mid_Urgot(Ratings):
pass
class NA_Khazix_Mid_Varus(Ratings):
pass
class NA_Khazix_Mid_Vayne(Ratings):
pass
class NA_Khazix_Mid_Veigar(Ratings):
pass
class NA_Khazix_Mid_Velkoz(Ratings):
pass
class NA_Khazix_Mid_Vi(Ratings):
pass
class NA_Khazix_Mid_Viktor(Ratings):
pass
class NA_Khazix_Mid_Vladimir(Ratings):
pass
class NA_Khazix_Mid_Volibear(Ratings):
pass
class NA_Khazix_Mid_Warwick(Ratings):
pass
class NA_Khazix_Mid_Xayah(Ratings):
pass
class NA_Khazix_Mid_Xerath(Ratings):
pass
class NA_Khazix_Mid_XinZhao(Ratings):
pass
class NA_Khazix_Mid_Yasuo(Ratings):
pass
class NA_Khazix_Mid_Yorick(Ratings):
pass
class NA_Khazix_Mid_Zac(Ratings):
pass
class NA_Khazix_Mid_Zed(Ratings):
pass
class NA_Khazix_Mid_Ziggs(Ratings):
pass
class NA_Khazix_Mid_Zilean(Ratings):
pass
class NA_Khazix_Mid_Zyra(Ratings):
pass
| 15.695444
| 46
| 0.766692
| 972
| 6,545
| 4.736626
| 0.151235
| 0.209818
| 0.389661
| 0.479583
| 0.803432
| 0.803432
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169748
| 6,545
| 416
| 47
| 15.733173
| 0.847258
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
85821392c09e1139f86fefd3c8ac45be23741603
| 194
|
py
|
Python
|
bdbc/lib/python3.5/site-packages/cryptoconditions/lib/__init__.py
|
entropyx/fiduchain-blockchain-interface
|
07336a5eebfaa9cddb148edb94461a8fd57562b1
|
[
"MIT"
] | null | null | null |
bdbc/lib/python3.5/site-packages/cryptoconditions/lib/__init__.py
|
entropyx/fiduchain-blockchain-interface
|
07336a5eebfaa9cddb148edb94461a8fd57562b1
|
[
"MIT"
] | null | null | null |
bdbc/lib/python3.5/site-packages/cryptoconditions/lib/__init__.py
|
entropyx/fiduchain-blockchain-interface
|
07336a5eebfaa9cddb148edb94461a8fd57562b1
|
[
"MIT"
] | null | null | null |
from cryptoconditions.lib.hasher import Hasher
from cryptoconditions.lib.writer import Writer
from cryptoconditions.lib.reader import Reader
from cryptoconditions.lib.predictor import Predictor
| 38.8
| 52
| 0.876289
| 24
| 194
| 7.083333
| 0.333333
| 0.470588
| 0.541176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082474
| 194
| 4
| 53
| 48.5
| 0.955056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
85a7738bb1ef52179bb19322f4c8baeca9533e96
| 216
|
py
|
Python
|
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | 4
|
2020-05-13T19:34:27.000Z
|
2021-09-20T09:01:10.000Z
|
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | null | null | null |
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | 2
|
2019-09-14T14:45:09.000Z
|
2020-11-22T01:46:59.000Z
|
from arcFinder import ArcSegmentFinder
from intersectionDetector import *
from lineFinder import LineSegmentFinder
from obstacleCourse import ObstacleCourse
import obstacleCourse
from pathSegment import PathSegment
| 27
| 41
| 0.888889
| 21
| 216
| 9.142857
| 0.428571
| 0.208333
| 0.354167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106481
| 216
| 7
| 42
| 30.857143
| 0.994819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a44960bd85f4ff25799e7078bbe153539f1243ba
| 266,820
|
py
|
Python
|
pyboto3/autoscaling.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/autoscaling.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/autoscaling.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def attach_instances(InstanceIds=None, AutoScalingGroupName=None):
"""
Attaches one or more EC2 instances to the specified Auto Scaling group.
When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.
If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.
For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example attaches the specified instance to the specified Auto Scaling group.
Expected Output:
:example: response = client.attach_instances(
InstanceIds=[
'string',
],
AutoScalingGroupName='string'
)
:type InstanceIds: list
:param InstanceIds: The IDs of the instances. You can specify up to 20 instances.\n\n(string) --\n\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:return: response = client.attach_instances(
AutoScalingGroupName='my-auto-scaling-group',
InstanceIds=[
'i-93633f9b',
],
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
"""
pass
def attach_load_balancer_target_groups(AutoScalingGroupName=None, TargetGroupARNs=None):
"""
Attaches one or more target groups to the specified Auto Scaling group.
To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.
With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example attaches the specified target group to the specified Auto Scaling group.
Expected Output:
:example: response = client.attach_load_balancer_target_groups(
AutoScalingGroupName='string',
TargetGroupARNs=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type TargetGroupARNs: list
:param TargetGroupARNs: [REQUIRED]\nThe Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target groups.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
Examples
This example attaches the specified target group to the specified Auto Scaling group.
response = client.attach_load_balancer_target_groups(
AutoScalingGroupName='my-auto-scaling-group',
TargetGroupARNs=[
'arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067',
],
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
(dict) --
"""
pass
def attach_load_balancers(AutoScalingGroupName=None, LoadBalancerNames=None):
"""
Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling registers the running instances with these Classic Load Balancers.
To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach the load balancer from the Auto Scaling group, call the DetachLoadBalancers API.
For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example attaches the specified load balancer to the specified Auto Scaling group.
Expected Output:
:example: response = client.attach_load_balancers(
AutoScalingGroupName='string',
LoadBalancerNames=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type LoadBalancerNames: list
:param LoadBalancerNames: [REQUIRED]\nThe names of the load balancers. You can specify up to 10 load balancers.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
Examples
This example attaches the specified load balancer to the specified Auto Scaling group.
response = client.attach_load_balancers(
AutoScalingGroupName='my-auto-scaling-group',
LoadBalancerNames=[
'my-load-balancer',
],
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
(dict) --
"""
pass
def batch_delete_scheduled_action(AutoScalingGroupName=None, ScheduledActionNames=None):
"""
Deletes one or more scheduled actions for the specified Auto Scaling group.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_scheduled_action(
AutoScalingGroupName='string',
ScheduledActionNames=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ScheduledActionNames: list
:param ScheduledActionNames: [REQUIRED]\nThe names of the scheduled actions to delete. The maximum number allowed is 50.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'FailedScheduledActions': [
{
'ScheduledActionName': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
Response Structure
(dict) --
FailedScheduledActions (list) --
The names of the scheduled actions that could not be deleted, including an error message.
(dict) --
Describes a scheduled action that could not be created, updated, or deleted.
ScheduledActionName (string) --
The name of the scheduled action.
ErrorCode (string) --
The error code.
ErrorMessage (string) --
The error message accompanying the error code.
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
:return: {
'FailedScheduledActions': [
{
'ScheduledActionName': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def batch_put_scheduled_update_group_action(AutoScalingGroupName=None, ScheduledUpdateGroupActions=None):
"""
Creates or updates one or more scheduled scaling actions for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_put_scheduled_update_group_action(
AutoScalingGroupName='string',
ScheduledUpdateGroupActions=[
{
'ScheduledActionName': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'Recurrence': 'string',
'MinSize': 123,
'MaxSize': 123,
'DesiredCapacity': 123
},
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ScheduledUpdateGroupActions: list
:param ScheduledUpdateGroupActions: [REQUIRED]\nOne or more scheduled actions. The maximum number allowed is 50.\n\n(dict) --Describes information used for one or more scheduled scaling action updates in a BatchPutScheduledUpdateGroupAction operation.\nWhen updating a scheduled scaling action, all optional parameters are left unchanged if not specified.\n\nScheduledActionName (string) -- [REQUIRED]The name of the scaling action.\n\nStartTime (datetime) --The date and time for the action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, '2019-06-01T00:00:00Z' ).\nIf you specify Recurrence and StartTime , Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.\nIf you try to schedule the action in the past, Amazon EC2 Auto Scaling returns an error message.\n\nEndTime (datetime) --The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.\n\nRecurrence (string) --The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, '30 0 1 1,6,12 *' ). For more information about this format, see Crontab .\nWhen StartTime and EndTime are specified with Recurrence , they form the boundaries of when the recurring action starts and stops.\n\nMinSize (integer) --The minimum size of the Auto Scaling group.\n\nMaxSize (integer) --The maximum size of the Auto Scaling group.\n\nDesiredCapacity (integer) --The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'FailedScheduledUpdateGroupActions': [
{
'ScheduledActionName': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
Response Structure
(dict) --
FailedScheduledUpdateGroupActions (list) --
The names of the scheduled actions that could not be created or updated, including an error message.
(dict) --
Describes a scheduled action that could not be created, updated, or deleted.
ScheduledActionName (string) --
The name of the scheduled action.
ErrorCode (string) --
The error code.
ErrorMessage (string) --
The error message accompanying the error code.
Exceptions
AutoScaling.Client.exceptions.AlreadyExistsFault
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
:return: {
'FailedScheduledUpdateGroupActions': [
{
'ScheduledActionName': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.AlreadyExistsFault
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def complete_lifecycle_action(LifecycleHookName=None, AutoScalingGroupName=None, LifecycleActionToken=None, LifecycleActionResult=None, InstanceId=None):
"""
Completes the lifecycle action for the specified token or instance with the specified result.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example notifies Auto Scaling that the specified lifecycle action is complete so that it can finish launching or terminating the instance.
Expected Output:
:example: response = client.complete_lifecycle_action(
LifecycleHookName='string',
AutoScalingGroupName='string',
LifecycleActionToken='string',
LifecycleActionResult='string',
InstanceId='string'
)
:type LifecycleHookName: string
:param LifecycleHookName: [REQUIRED]\nThe name of the lifecycle hook.\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type LifecycleActionToken: string
:param LifecycleActionToken: A universally unique identifier (UUID) that identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.
:type LifecycleActionResult: string
:param LifecycleActionResult: [REQUIRED]\nThe action for the group to take. This parameter can be either CONTINUE or ABANDON .\n
:type InstanceId: string
:param InstanceId: The ID of the instance.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example notifies Auto Scaling that the specified lifecycle action is complete so that it can finish launching or terminating the instance.
response = client.complete_lifecycle_action(
AutoScalingGroupName='my-auto-scaling-group',
LifecycleActionResult='CONTINUE',
LifecycleActionToken='bcd2f1b8-9a78-44d3-8a7a-4dd07d7cf635',
LifecycleHookName='my-lifecycle-hook',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
LifecycleHookName (string) -- [REQUIRED]
The name of the lifecycle hook.
AutoScalingGroupName (string) -- [REQUIRED]
The name of the Auto Scaling group.
LifecycleActionToken (string) -- A universally unique identifier (UUID) that identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.
LifecycleActionResult (string) -- [REQUIRED]
The action for the group to take. This parameter can be either CONTINUE or ABANDON .
InstanceId (string) -- The ID of the instance.
"""
pass
def create_auto_scaling_group(AutoScalingGroupName=None, LaunchConfigurationName=None, LaunchTemplate=None, MixedInstancesPolicy=None, InstanceId=None, MinSize=None, MaxSize=None, DesiredCapacity=None, DefaultCooldown=None, AvailabilityZones=None, LoadBalancerNames=None, TargetGroupARNs=None, HealthCheckType=None, HealthCheckGracePeriod=None, PlacementGroup=None, VPCZoneIdentifier=None, TerminationPolicies=None, NewInstancesProtectedFromScaleIn=None, LifecycleHookSpecificationList=None, Tags=None, ServiceLinkedRoleARN=None, MaxInstanceLifetime=None):
"""
Creates an Auto Scaling group with the specified name and attributes.
If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide .
For introductory exercises for creating an Auto Scaling group, see Getting Started with Amazon EC2 Auto Scaling and Tutorial: Set Up a Scaled and Load-Balanced Application in the Amazon EC2 Auto Scaling User Guide . For more information, see Auto Scaling Groups in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example creates an Auto Scaling group.
Expected Output:
This example creates an Auto Scaling group and attaches the specified Classic Load Balancer.
Expected Output:
This example creates an Auto Scaling group and attaches the specified target group.
Expected Output:
:example: response = client.create_auto_scaling_group(
AutoScalingGroupName='string',
LaunchConfigurationName='string',
LaunchTemplate={
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
MixedInstancesPolicy={
'LaunchTemplate': {
'LaunchTemplateSpecification': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'Overrides': [
{
'InstanceType': 'string',
'WeightedCapacity': 'string'
},
]
},
'InstancesDistribution': {
'OnDemandAllocationStrategy': 'string',
'OnDemandBaseCapacity': 123,
'OnDemandPercentageAboveBaseCapacity': 123,
'SpotAllocationStrategy': 'string',
'SpotInstancePools': 123,
'SpotMaxPrice': 'string'
}
},
InstanceId='string',
MinSize=123,
MaxSize=123,
DesiredCapacity=123,
DefaultCooldown=123,
AvailabilityZones=[
'string',
],
LoadBalancerNames=[
'string',
],
TargetGroupARNs=[
'string',
],
HealthCheckType='string',
HealthCheckGracePeriod=123,
PlacementGroup='string',
VPCZoneIdentifier='string',
TerminationPolicies=[
'string',
],
NewInstancesProtectedFromScaleIn=True|False,
LifecycleHookSpecificationList=[
{
'LifecycleHookName': 'string',
'LifecycleTransition': 'string',
'NotificationMetadata': 'string',
'HeartbeatTimeout': 123,
'DefaultResult': 'string',
'NotificationTargetARN': 'string',
'RoleARN': 'string'
},
],
Tags=[
{
'ResourceId': 'string',
'ResourceType': 'string',
'Key': 'string',
'Value': 'string',
'PropagateAtLaunch': True|False
},
],
ServiceLinkedRoleARN='string',
MaxInstanceLifetime=123
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group. This name must be unique per Region per account.\n
:type LaunchConfigurationName: string
:param LaunchConfigurationName: The name of the launch configuration to use when an instance is launched. To get the launch configuration name, use the DescribeLaunchConfigurations API operation. New launch configurations can be created with the CreateLaunchConfiguration API.\nYou must specify one of the following parameters in your request: LaunchConfigurationName , LaunchTemplate , InstanceId , or MixedInstancesPolicy .\n
:type LaunchTemplate: dict
:param LaunchTemplate: Parameters used to specify the launch template and version to use when an instance is launched.\nFor more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference .\nYou can alternatively associate a launch template to the Auto Scaling group by using the MixedInstancesPolicy parameter.\nYou must specify one of the following parameters in your request: LaunchConfigurationName , LaunchTemplate , InstanceId , or MixedInstancesPolicy .\n\nLaunchTemplateId (string) --The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.\nYou must specify either a template ID or a template name.\n\nLaunchTemplateName (string) --The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.\nYou must specify either a template ID or a template name.\n\nVersion (string) --The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.\nIf the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n\n\n
:type MixedInstancesPolicy: dict
:param MixedInstancesPolicy: An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.\nThe policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity, but also the parameters that specify the instance configuration information\xe2\x80\x94the launch template and instance types.\nFor more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide .\nYou must specify one of the following parameters in your request: LaunchConfigurationName , LaunchTemplate , InstanceId , or MixedInstancesPolicy .\n\nLaunchTemplate (dict) --The launch template and instance types (overrides).\nThis parameter must be specified when creating a mixed instances policy.\n\nLaunchTemplateSpecification (dict) --The launch template to use. You must specify either the launch template ID or launch template name in the request.\n\nLaunchTemplateId (string) --The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.\nYou must specify either a template ID or a template name.\n\nLaunchTemplateName (string) --The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.\nYou must specify either a template ID or a template name.\n\nVersion (string) --The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.\nIf the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n\n\n\nOverrides (list) --Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.\nIf not provided, Amazon EC2 Auto Scaling will use the instance type specified in the launch template to launch instances.\n\n(dict) --Describes an override for a launch template. Currently, the only supported override is instance type.\nThe maximum number of instance type overrides that can be associated with an Auto Scaling group is 20.\n\nInstanceType (string) --The instance type. You must use an instance type that is supported in your requested Region and Availability Zones.\nFor information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.\n\nWeightedCapacity (string) --The number of capacity units, which gives the instance type a proportional weight to other instance types. For example, larger instance types are generally weighted more than smaller instance types. These are the same units that you chose to set the desired capacity in terms of instances, or a performance attribute such as vCPUs, memory, or I/O.\nFor more information, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide .\nValid Range: Minimum value of 1. Maximum value of 999.\n\n\n\n\n\n\n\nInstancesDistribution (dict) --The instances distribution to use.\nIf you leave this parameter unspecified, the value for each parameter in InstancesDistribution uses a default value.\n\nOnDemandAllocationStrategy (string) --Indicates how to allocate instance types to fulfill On-Demand capacity.\nThe only valid value is prioritized , which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.\n\nOnDemandBaseCapacity (integer) --The minimum amount of the Auto Scaling group\'s capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.\nDefault if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group\'s desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.\n\nNote\nAn update to this setting means a gradual replacement of instances to maintain the specified number of On-Demand Instances for your base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.\n\n\nOnDemandPercentageAboveBaseCapacity (integer) --Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity .\nDefault if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.\n\nNote\nAn update to this setting means a gradual replacement of instances to maintain the percentage of On-Demand Instances for your additional capacity above the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.\n\nValid Range: Minimum value of 0. Maximum value of 100.\n\nSpotAllocationStrategy (string) --Indicates how to allocate instances across Spot Instance pools.\nIf the allocation strategy is lowest-price , the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized , the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.\nThe default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price . The default Spot allocation strategy for the AWS Management Console is capacity-optimized .\nValid values: lowest-price | capacity-optimized\n\nSpotInstancePools (integer) --The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate . Default if not set is 2.\nUsed only when the Spot allocation strategy is lowest-price .\nValid Range: Minimum value of 1. Maximum value of 20.\n\nSpotMaxPrice (string) --The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.\nTo remove a value that you previously set, include the parameter but leave the value blank.\n\n\n\n\n
:type InstanceId: string
:param InstanceId: The ID of the instance used to create a launch configuration for the group. To get the instance ID, use the Amazon EC2 DescribeInstances API operation.\nWhen you specify an ID of an instance, Amazon EC2 Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, except for the block device mapping.\nYou must specify one of the following parameters in your request: LaunchConfigurationName , LaunchTemplate , InstanceId , or MixedInstancesPolicy .\n
:type MinSize: integer
:param MinSize: [REQUIRED]\nThe minimum size of the group.\n
:type MaxSize: integer
:param MaxSize: [REQUIRED]\nThe maximum size of the group.\n\nNote\nWith a mixed instances policy that uses instance weighting, Amazon EC2 Auto Scaling may need to go above MaxSize to meet your capacity requirements. In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more than your maximum instance weight (weights that define how many capacity units each instance contributes to the capacity of the group).\n\n
:type DesiredCapacity: integer
:param DesiredCapacity: The desired capacity is the initial capacity of the Auto Scaling group at the time of its creation and the capacity it attempts to maintain. It can scale beyond this capacity if you configure automatic scaling.\nThis number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group.\n
:type DefaultCooldown: integer
:param DefaultCooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300 .\nFor more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n
:type AvailabilityZones: list
:param AvailabilityZones: One or more Availability Zones for the group. This parameter is optional if you specify one or more subnets for VPCZoneIdentifier .\nConditional: If your account supports EC2-Classic and VPC, this parameter is required to launch instances into EC2-Classic.\n\n(string) --\n\n
:type LoadBalancerNames: list
:param LoadBalancerNames: A list of Classic Load Balancers associated with this Auto Scaling group. For Application Load Balancers and Network Load Balancers, specify a list of target groups using the TargetGroupARNs property instead.\nFor more information, see Using a Load Balancer with an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n\n(string) --\n\n
:type TargetGroupARNs: list
:param TargetGroupARNs: The Amazon Resource Names (ARN) of the target groups to associate with the Auto Scaling group. Instances are registered as targets in a target group, and traffic is routed to the target group.\nFor more information, see Using a Load Balancer with an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n\n(string) --\n\n
:type HealthCheckType: string
:param HealthCheckType: The service to use for the health checks. The valid values are EC2 and ELB . The default value is EC2 . If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.\nFor more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide .\n
:type HealthCheckGracePeriod: integer
:param HealthCheckGracePeriod: The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default value is 0 .\nFor more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide .\nConditional: This parameter is required if you are adding an ELB health check.\n
:type PlacementGroup: string
:param PlacementGroup: The name of the placement group into which to launch your instances, if any. A placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a placement group. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances .
:type VPCZoneIdentifier: string
:param VPCZoneIdentifier: A comma-separated list of subnet IDs for your virtual private cloud (VPC).\nIf you specify VPCZoneIdentifier with AvailabilityZones , the subnets that you specify for this parameter must reside in those Availability Zones.\nConditional: If your account supports EC2-Classic and VPC, this parameter is required to launch instances into a VPC.\n
:type TerminationPolicies: list
:param TerminationPolicies: One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.\nFor more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide .\n\n(string) --\n\n
:type NewInstancesProtectedFromScaleIn: boolean
:param NewInstancesProtectedFromScaleIn: Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in.\nFor more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide .\n
:type LifecycleHookSpecificationList: list
:param LifecycleHookSpecificationList: One or more lifecycle hooks.\n\n(dict) --Describes information used to specify a lifecycle hook for an Auto Scaling group.\nA lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).\nThis step is a part of the procedure for creating a lifecycle hook for an Auto Scaling group:\n\n(Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.\n(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.\nCreate the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.\nIf you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.\nIf you finish before the timeout period ends, complete the lifecycle action.\n\nFor more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide .\n\nLifecycleHookName (string) -- [REQUIRED]The name of the lifecycle hook.\n\nLifecycleTransition (string) -- [REQUIRED]The state of the EC2 instance to which you want to attach the lifecycle hook. The valid values are:\n\nautoscaling:EC2_INSTANCE_LAUNCHING\nautoscaling:EC2_INSTANCE_TERMINATING\n\n\nNotificationMetadata (string) --Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.\n\nHeartbeatTimeout (integer) --The maximum time, in seconds, that can elapse before the lifecycle hook times out.\nIf the lifecycle hook times out, Amazon EC2 Auto Scaling performs the action that you specified in the DefaultResult parameter. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat .\n\nDefaultResult (string) --Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The valid values are CONTINUE and ABANDON . The default value is ABANDON .\n\nNotificationTargetARN (string) --The ARN of the target that Amazon EC2 Auto Scaling sends notifications to when an instance is in the transition state for the lifecycle hook. The notification target can be either an SQS queue or an SNS topic.\n\nRoleARN (string) --The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.\n\n\n\n\n
:type Tags: list
:param Tags: One or more tags. You can tag your Auto Scaling group and propagate the tags to the Amazon EC2 instances it launches.\nTags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS volumes, specify the tags in a launch template but use caution. If the launch template specifies an instance tag with a key that is also specified for the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that instance tag with the value specified by the Auto Scaling group.\nFor more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide .\n\n(dict) --Describes a tag for an Auto Scaling group.\n\nResourceId (string) --The name of the group.\n\nResourceType (string) --The type of resource. The only supported value is auto-scaling-group .\n\nKey (string) -- [REQUIRED]The tag key.\n\nValue (string) --The tag value.\n\nPropagateAtLaunch (boolean) --Determines whether the tag is added to new instances as they are launched in the group.\n\n\n\n\n
:type ServiceLinkedRoleARN: string
:param ServiceLinkedRoleARN: The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. By default, Amazon EC2 Auto Scaling uses a service-linked role named AWSServiceRoleForAutoScaling, which it creates if it does not exist. For more information, see Service-Linked Roles in the Amazon EC2 Auto Scaling User Guide .
:type MaxInstanceLifetime: integer
:param MaxInstanceLifetime: The maximum amount of time, in seconds, that an instance can be in service. The default is null.\nThis parameter is optional, but if you specify a value for it, you must specify a value of at least 604,800 seconds (7 days). To clear a previously set value, specify a new value of 0.\nFor more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide .\nValid Range: Minimum value of 0.\n
:return: response = client.create_auto_scaling_group(
AutoScalingGroupName='my-auto-scaling-group',
LaunchConfigurationName='my-launch-config',
MaxSize=3,
MinSize=1,
VPCZoneIdentifier='subnet-4176792c',
)
print(response)
:returns:
AutoScaling.Client.exceptions.AlreadyExistsFault
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
"""
pass
def create_launch_configuration(LaunchConfigurationName=None, ImageId=None, KeyName=None, SecurityGroups=None, ClassicLinkVPCId=None, ClassicLinkVPCSecurityGroups=None, UserData=None, InstanceId=None, InstanceType=None, KernelId=None, RamdiskId=None, BlockDeviceMappings=None, InstanceMonitoring=None, SpotPrice=None, IamInstanceProfile=None, EbsOptimized=None, AssociatePublicIpAddress=None, PlacementTenancy=None):
"""
Creates a launch configuration.
If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide .
For more information, see Launch Configurations in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example creates a launch configuration.
Expected Output:
:example: response = client.create_launch_configuration(
LaunchConfigurationName='string',
ImageId='string',
KeyName='string',
SecurityGroups=[
'string',
],
ClassicLinkVPCId='string',
ClassicLinkVPCSecurityGroups=[
'string',
],
UserData='string',
InstanceId='string',
InstanceType='string',
KernelId='string',
RamdiskId='string',
BlockDeviceMappings=[
{
'VirtualName': 'string',
'DeviceName': 'string',
'Ebs': {
'SnapshotId': 'string',
'VolumeSize': 123,
'VolumeType': 'string',
'DeleteOnTermination': True|False,
'Iops': 123,
'Encrypted': True|False
},
'NoDevice': True|False
},
],
InstanceMonitoring={
'Enabled': True|False
},
SpotPrice='string',
IamInstanceProfile='string',
EbsOptimized=True|False,
AssociatePublicIpAddress=True|False,
PlacementTenancy='string'
)
:type LaunchConfigurationName: string
:param LaunchConfigurationName: [REQUIRED]\nThe name of the launch configuration. This name must be unique per Region per account.\n
:type ImageId: string
:param ImageId: The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances .\nIf you do not specify InstanceId , you must specify ImageId .\n
:type KeyName: string
:param KeyName: The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances .
:type SecurityGroups: list
:param SecurityGroups: A list that contains the security groups to assign to the instances in the Auto Scaling group.\n[EC2-VPC] Specify the security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide .\n[EC2-Classic] Specify either the security group names or the security group IDs. For more information, see Amazon EC2 Security Groups in the Amazon EC2 User Guide for Linux Instances .\n\n(string) --\n\n
:type ClassicLinkVPCId: string
:param ClassicLinkVPCId: The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide .\nThis parameter can only be used if you are launching EC2-Classic instances.\n
:type ClassicLinkVPCSecurityGroups: list
:param ClassicLinkVPCSecurityGroups: The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide .\nIf you specify the ClassicLinkVPCId parameter, you must specify this parameter.\n\n(string) --\n\n
:type UserData: string
:param UserData: The Base64-encoded user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances .\n\nThis value will be base64 encoded automatically. Do not base64 encode this value prior to performing the operation.\n
:type InstanceId: string
:param InstanceId: The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping.\nTo create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.\nFor more information, see Create a Launch Configuration Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide .\nIf you do not specify InstanceId , you must specify both ImageId and InstanceType .\n
:type InstanceType: string
:param InstanceType: Specifies the instance type of the EC2 instance.\nFor information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.\nIf you do not specify InstanceId , you must specify InstanceType .\n
:type KernelId: string
:param KernelId: The ID of the kernel associated with the AMI.
:type RamdiskId: string
:param RamdiskId: The ID of the RAM disk to select.
:type BlockDeviceMappings: list
:param BlockDeviceMappings: A block device mapping, which specifies the block devices for the instance. You can specify virtual devices and EBS volumes. For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances .\n\n(dict) --Describes a block device mapping.\n\nVirtualName (string) --The name of the virtual device (for example, ephemeral0 ).\nYou can specify either VirtualName or Ebs , but not both.\n\nDeviceName (string) -- [REQUIRED]The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh ). For more information, see Device Naming on Linux Instances in the Amazon EC2 User Guide for Linux Instances .\n\nEbs (dict) --Parameters used to automatically set up EBS volumes when an instance is launched.\nYou can specify either VirtualName or Ebs , but not both.\n\nSnapshotId (string) --The snapshot ID of the volume to use.\nConditional: This parameter is optional if you specify a volume size. If you specify both SnapshotId and VolumeSize , VolumeSize must be equal or greater than the size of the snapshot.\n\nVolumeSize (integer) --The volume size, in Gibibytes (GiB).\nThis can be a number from 1-1,024 for standard , 4-16,384 for io1 , 1-16,384 for gp2 , and 500-16,384 for st1 and sc1 . If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\nDefault: If you create a volume from a snapshot and you don\'t specify a volume size, the default is the snapshot size.\n\nNote\nAt least one of VolumeSize or SnapshotId is required.\n\n\nVolumeType (string) --The volume type, which can be standard for Magnetic, io1 for Provisioned IOPS SSD, gp2 for General Purpose SSD, st1 for Throughput Optimized HDD, or sc1 for Cold HDD. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances .\nValid Values: standard | io1 | gp2 | st1 | sc1\n\nDeleteOnTermination (boolean) --Indicates whether the volume is deleted on instance termination. For Amazon EC2 Auto Scaling, the default value is true .\n\nIops (integer) --The number of I/O operations per second (IOPS) to provision for the volume. The maximum ratio of IOPS to volume size (in GiB) is 50:1. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances .\nConditional: This parameter is required when the volume type is io1 . (Not used with standard , gp2 , st1 , or sc1 volumes.)\n\nEncrypted (boolean) --Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types . If your AMI uses encrypted volumes, you can also only launch it on supported instance types.\n\nNote\nIf you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.\nEnabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.\n\nFor more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide .\n\n\n\nNoDevice (boolean) --Setting this value to true suppresses the specified device included in the block device mapping of the AMI.\nIf NoDevice is true for the root device, instances might fail the EC2 health check. In that case, Amazon EC2 Auto Scaling launches replacement instances.\nIf you specify NoDevice , you cannot specify Ebs .\n\n\n\n\n
:type InstanceMonitoring: dict
:param InstanceMonitoring: Controls whether instances in this group are launched with detailed (true ) or basic (false ) monitoring.\nThe default value is true (enabled).\n\nWarning\nWhen detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide .\n\n\nEnabled (boolean) --If true , detailed monitoring is enabled. Otherwise, basic monitoring is enabled.\n\n\n
:type SpotPrice: string
:param SpotPrice: The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .\n\nNote\nWhen you change your maximum price by creating a new launch configuration, running instances will continue to run as long as the maximum price for those running instances is higher than the current Spot price.\n\n
:type IamInstanceProfile: string
:param IamInstanceProfile: The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.\nFor more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide .\n
:type EbsOptimized: boolean
:param EbsOptimized: Specifies whether the launch configuration is optimized for EBS I/O (true ) or not (false ). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances .\nThe default value is false .\n
:type AssociatePublicIpAddress: boolean
:param AssociatePublicIpAddress: For Auto Scaling groups that are running in a virtual private cloud (VPC), specifies whether to assign a public IP address to the group\'s instances. If you specify true , each instance in the Auto Scaling group receives a unique public IP address. For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide .\nIf you specify this parameter, you must specify at least one subnet for VPCZoneIdentifier when you create your group.\n\nNote\nIf the instance is launched into a default subnet, the default is to assign a public IP address, unless you disabled the option to assign a public IP address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address, unless you enabled the option to assign a public IP address on the subnet.\n\n
:type PlacementTenancy: string
:param PlacementTenancy: The tenancy of the instance. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.\nTo launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default ), you must set the value of this parameter to dedicated .\nIf you specify PlacementTenancy , you must specify at least one subnet for VPCZoneIdentifier when you create your group.\nFor more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide .\nValid Values: default | dedicated\n
:return: response = client.create_launch_configuration(
IamInstanceProfile='my-iam-role',
ImageId='ami-12345678',
InstanceType='m3.medium',
LaunchConfigurationName='my-launch-config',
SecurityGroups=[
'sg-eb2af88e',
],
)
print(response)
:returns:
AutoScaling.Client.exceptions.AlreadyExistsFault
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def create_or_update_tags(Tags=None):
"""
Creates or updates tags for the specified Auto Scaling group.
When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.
For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example adds two tags to the specified Auto Scaling group.
Expected Output:
:example: response = client.create_or_update_tags(
Tags=[
{
'ResourceId': 'string',
'ResourceType': 'string',
'Key': 'string',
'Value': 'string',
'PropagateAtLaunch': True|False
},
]
)
:type Tags: list
:param Tags: [REQUIRED]\nOne or more tags.\n\n(dict) --Describes a tag for an Auto Scaling group.\n\nResourceId (string) --The name of the group.\n\nResourceType (string) --The type of resource. The only supported value is auto-scaling-group .\n\nKey (string) -- [REQUIRED]The tag key.\n\nValue (string) --The tag value.\n\nPropagateAtLaunch (boolean) --Determines whether the tag is added to new instances as they are launched in the group.\n\n\n\n\n
:return: response = client.create_or_update_tags(
Tags=[
{
'Key': 'Role',
'PropagateAtLaunch': True,
'ResourceId': 'my-auto-scaling-group',
'ResourceType': 'auto-scaling-group',
'Value': 'WebServer',
},
{
'Key': 'Dept',
'PropagateAtLaunch': True,
'ResourceId': 'my-auto-scaling-group',
'ResourceType': 'auto-scaling-group',
'Value': 'Research',
},
],
)
print(response)
"""
pass
def delete_auto_scaling_group(AutoScalingGroupName=None, ForceDelete=None):
"""
Deletes the specified Auto Scaling group.
If the group has instances or scaling activities in progress, you must specify the option to force the deletion in order for it to succeed.
If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm that no longer has an associated action.
To remove instances from the Auto Scaling group before deleting it, call the DetachInstances API with the list of instances and the option to decrement the desired capacity. This ensures that Amazon EC2 Auto Scaling does not launch replacement instances.
To terminate all instances before deleting the Auto Scaling group, call the UpdateAutoScalingGroup API and set the minimum size and desired capacity of the Auto Scaling group to zero.
See also: AWS API Documentation
Exceptions
Examples
This example deletes the specified Auto Scaling group.
Expected Output:
This example deletes the specified Auto Scaling group and all its instances.
Expected Output:
:example: response = client.delete_auto_scaling_group(
AutoScalingGroupName='string',
ForceDelete=True|False
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ForceDelete: boolean
:param ForceDelete: Specifies that the group is to be deleted along with all instances associated with the group, without waiting for all instances to be terminated. This parameter also deletes any lifecycle actions associated with the group.
:return: response = client.delete_auto_scaling_group(
AutoScalingGroupName='my-auto-scaling-group',
)
print(response)
:returns:
AutoScaling.Client.exceptions.ScalingActivityInProgressFault
AutoScaling.Client.exceptions.ResourceInUseFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def delete_launch_configuration(LaunchConfigurationName=None):
"""
Deletes the specified launch configuration.
The launch configuration must not be attached to an Auto Scaling group. When this call completes, the launch configuration is no longer available for use.
See also: AWS API Documentation
Exceptions
Examples
This example deletes the specified launch configuration.
Expected Output:
:example: response = client.delete_launch_configuration(
LaunchConfigurationName='string'
)
:type LaunchConfigurationName: string
:param LaunchConfigurationName: [REQUIRED]\nThe name of the launch configuration.\n
:return: response = client.delete_launch_configuration(
LaunchConfigurationName='my-launch-config',
)
print(response)
"""
pass
def delete_lifecycle_hook(LifecycleHookName=None, AutoScalingGroupName=None):
"""
Deletes the specified lifecycle hook.
If there are any outstanding lifecycle actions, they are completed first (ABANDON for launching instances, CONTINUE for terminating instances).
See also: AWS API Documentation
Exceptions
Examples
This example deletes the specified lifecycle hook.
Expected Output:
:example: response = client.delete_lifecycle_hook(
LifecycleHookName='string',
AutoScalingGroupName='string'
)
:type LifecycleHookName: string
:param LifecycleHookName: [REQUIRED]\nThe name of the lifecycle hook.\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example deletes the specified lifecycle hook.
response = client.delete_lifecycle_hook(
AutoScalingGroupName='my-auto-scaling-group',
LifecycleHookName='my-lifecycle-hook',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
(dict) --
"""
pass
def delete_notification_configuration(AutoScalingGroupName=None, TopicARN=None):
"""
Deletes the specified notification.
See also: AWS API Documentation
Exceptions
Examples
This example deletes the specified notification from the specified Auto Scaling group.
Expected Output:
:example: response = client.delete_notification_configuration(
AutoScalingGroupName='string',
TopicARN='string'
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type TopicARN: string
:param TopicARN: [REQUIRED]\nThe Amazon Resource Name (ARN) of the Amazon Simple Notification Service (Amazon SNS) topic.\n
:return: response = client.delete_notification_configuration(
AutoScalingGroupName='my-auto-scaling-group',
TopicARN='arn:aws:sns:us-west-2:123456789012:my-sns-topic',
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def delete_policy(AutoScalingGroupName=None, PolicyName=None):
"""
Deletes the specified scaling policy.
Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.
For more information, see Deleting a Scaling Policy in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example deletes the specified Auto Scaling policy.
Expected Output:
:example: response = client.delete_policy(
AutoScalingGroupName='string',
PolicyName='string'
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: The name of the Auto Scaling group.
:type PolicyName: string
:param PolicyName: [REQUIRED]\nThe name or Amazon Resource Name (ARN) of the policy.\n
:return: response = client.delete_policy(
AutoScalingGroupName='my-auto-scaling-group',
PolicyName='ScaleIn',
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
"""
pass
def delete_scheduled_action(AutoScalingGroupName=None, ScheduledActionName=None):
"""
Deletes the specified scheduled action.
See also: AWS API Documentation
Exceptions
Examples
This example deletes the specified scheduled action from the specified Auto Scaling group.
Expected Output:
:example: response = client.delete_scheduled_action(
AutoScalingGroupName='string',
ScheduledActionName='string'
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ScheduledActionName: string
:param ScheduledActionName: [REQUIRED]\nThe name of the action to delete.\n
:return: response = client.delete_scheduled_action(
AutoScalingGroupName='my-auto-scaling-group',
ScheduledActionName='my-scheduled-action',
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def delete_tags(Tags=None):
"""
Deletes the specified tags.
See also: AWS API Documentation
Exceptions
Examples
This example deletes the specified tag from the specified Auto Scaling group.
Expected Output:
:example: response = client.delete_tags(
Tags=[
{
'ResourceId': 'string',
'ResourceType': 'string',
'Key': 'string',
'Value': 'string',
'PropagateAtLaunch': True|False
},
]
)
:type Tags: list
:param Tags: [REQUIRED]\nOne or more tags.\n\n(dict) --Describes a tag for an Auto Scaling group.\n\nResourceId (string) --The name of the group.\n\nResourceType (string) --The type of resource. The only supported value is auto-scaling-group .\n\nKey (string) -- [REQUIRED]The tag key.\n\nValue (string) --The tag value.\n\nPropagateAtLaunch (boolean) --Determines whether the tag is added to new instances as they are launched in the group.\n\n\n\n\n
:return: response = client.delete_tags(
Tags=[
{
'Key': 'Dept',
'ResourceId': 'my-auto-scaling-group',
'ResourceType': 'auto-scaling-group',
'Value': 'Research',
},
],
)
print(response)
"""
pass
def describe_account_limits():
"""
Describes the current Amazon EC2 Auto Scaling resource quotas for your AWS account.
For information about requesting an increase, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example describes the Auto Scaling limits for your AWS account.
Expected Output:
:example: response = client.describe_account_limits()
:rtype: dict
ReturnsResponse Syntax{
'MaxNumberOfAutoScalingGroups': 123,
'MaxNumberOfLaunchConfigurations': 123,
'NumberOfAutoScalingGroups': 123,
'NumberOfLaunchConfigurations': 123
}
Response Structure
(dict) --
MaxNumberOfAutoScalingGroups (integer) --The maximum number of groups allowed for your AWS account. The default is 200 groups per AWS Region.
MaxNumberOfLaunchConfigurations (integer) --The maximum number of launch configurations allowed for your AWS account. The default is 200 launch configurations per AWS Region.
NumberOfAutoScalingGroups (integer) --The current number of groups for your AWS account.
NumberOfLaunchConfigurations (integer) --The current number of launch configurations for your AWS account.
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the Auto Scaling limits for your AWS account.
response = client.describe_account_limits(
)
print(response)
Expected Output:
{
'MaxNumberOfAutoScalingGroups': 20,
'MaxNumberOfLaunchConfigurations': 100,
'NumberOfAutoScalingGroups': 3,
'NumberOfLaunchConfigurations': 5,
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'MaxNumberOfAutoScalingGroups': 123,
'MaxNumberOfLaunchConfigurations': 123,
'NumberOfAutoScalingGroups': 123,
'NumberOfLaunchConfigurations': 123
}
"""
pass
def describe_adjustment_types():
"""
Describes the available adjustment types for Amazon EC2 Auto Scaling scaling policies. These settings apply to step scaling policies and simple scaling policies; they do not apply to target tracking scaling policies.
The following adjustment types are supported:
See also: AWS API Documentation
Exceptions
Examples
This example describes the available adjustment types.
Expected Output:
:example: response = client.describe_adjustment_types()
:rtype: dict
ReturnsResponse Syntax{
'AdjustmentTypes': [
{
'AdjustmentType': 'string'
},
]
}
Response Structure
(dict) --
AdjustmentTypes (list) --The policy adjustment types.
(dict) --Describes a policy adjustment type.
AdjustmentType (string) --The policy adjustment type. The valid values are ChangeInCapacity , ExactCapacity , and PercentChangeInCapacity .
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the available adjustment types.
response = client.describe_adjustment_types(
)
print(response)
Expected Output:
{
'AdjustmentTypes': [
{
'AdjustmentType': 'ChangeInCapacity',
},
{
'AdjustmentType': 'ExactCapcity',
},
{
'AdjustmentType': 'PercentChangeInCapacity',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'AdjustmentTypes': [
{
'AdjustmentType': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def describe_auto_scaling_groups(AutoScalingGroupNames=None, NextToken=None, MaxRecords=None):
"""
Describes one or more Auto Scaling groups.
See also: AWS API Documentation
Exceptions
Examples
This example describes the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[
'string',
],
NextToken='string',
MaxRecords=123
)
:type AutoScalingGroupNames: list
:param AutoScalingGroupNames: The names of the Auto Scaling groups. Each name can be a maximum of 1600 characters. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords parameter.\nIf you omit this parameter, all Auto Scaling groups are described.\n\n(string) --\n\n
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100 .
:rtype: dict
ReturnsResponse Syntax
{
'AutoScalingGroups': [
{
'AutoScalingGroupName': 'string',
'AutoScalingGroupARN': 'string',
'LaunchConfigurationName': 'string',
'LaunchTemplate': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'MixedInstancesPolicy': {
'LaunchTemplate': {
'LaunchTemplateSpecification': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'Overrides': [
{
'InstanceType': 'string',
'WeightedCapacity': 'string'
},
]
},
'InstancesDistribution': {
'OnDemandAllocationStrategy': 'string',
'OnDemandBaseCapacity': 123,
'OnDemandPercentageAboveBaseCapacity': 123,
'SpotAllocationStrategy': 'string',
'SpotInstancePools': 123,
'SpotMaxPrice': 'string'
}
},
'MinSize': 123,
'MaxSize': 123,
'DesiredCapacity': 123,
'DefaultCooldown': 123,
'AvailabilityZones': [
'string',
],
'LoadBalancerNames': [
'string',
],
'TargetGroupARNs': [
'string',
],
'HealthCheckType': 'string',
'HealthCheckGracePeriod': 123,
'Instances': [
{
'InstanceId': 'string',
'InstanceType': 'string',
'AvailabilityZone': 'string',
'LifecycleState': 'Pending'|'Pending:Wait'|'Pending:Proceed'|'Quarantined'|'InService'|'Terminating'|'Terminating:Wait'|'Terminating:Proceed'|'Terminated'|'Detaching'|'Detached'|'EnteringStandby'|'Standby',
'HealthStatus': 'string',
'LaunchConfigurationName': 'string',
'LaunchTemplate': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'ProtectedFromScaleIn': True|False,
'WeightedCapacity': 'string'
},
],
'CreatedTime': datetime(2015, 1, 1),
'SuspendedProcesses': [
{
'ProcessName': 'string',
'SuspensionReason': 'string'
},
],
'PlacementGroup': 'string',
'VPCZoneIdentifier': 'string',
'EnabledMetrics': [
{
'Metric': 'string',
'Granularity': 'string'
},
],
'Status': 'string',
'Tags': [
{
'ResourceId': 'string',
'ResourceType': 'string',
'Key': 'string',
'Value': 'string',
'PropagateAtLaunch': True|False
},
],
'TerminationPolicies': [
'string',
],
'NewInstancesProtectedFromScaleIn': True|False,
'ServiceLinkedRoleARN': 'string',
'MaxInstanceLifetime': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
AutoScalingGroups (list) --
The groups.
(dict) --
Describes an Auto Scaling group.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
AutoScalingGroupARN (string) --
The Amazon Resource Name (ARN) of the Auto Scaling group.
LaunchConfigurationName (string) --
The name of the associated launch configuration.
LaunchTemplate (dict) --
The launch template for the group.
LaunchTemplateId (string) --
The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
LaunchTemplateName (string) --
The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
Version (string) --
The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.
If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .
MixedInstancesPolicy (dict) --
The mixed instances policy for the group.
LaunchTemplate (dict) --
The launch template and instance types (overrides).
This parameter must be specified when creating a mixed instances policy.
LaunchTemplateSpecification (dict) --
The launch template to use. You must specify either the launch template ID or launch template name in the request.
LaunchTemplateId (string) --
The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
LaunchTemplateName (string) --
The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
Version (string) --
The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.
If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .
Overrides (list) --
Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.
If not provided, Amazon EC2 Auto Scaling will use the instance type specified in the launch template to launch instances.
(dict) --
Describes an override for a launch template. Currently, the only supported override is instance type.
The maximum number of instance type overrides that can be associated with an Auto Scaling group is 20.
InstanceType (string) --
The instance type. You must use an instance type that is supported in your requested Region and Availability Zones.
For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.
WeightedCapacity (string) --
The number of capacity units, which gives the instance type a proportional weight to other instance types. For example, larger instance types are generally weighted more than smaller instance types. These are the same units that you chose to set the desired capacity in terms of instances, or a performance attribute such as vCPUs, memory, or I/O.
For more information, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide .
Valid Range: Minimum value of 1. Maximum value of 999.
InstancesDistribution (dict) --
The instances distribution to use.
If you leave this parameter unspecified, the value for each parameter in InstancesDistribution uses a default value.
OnDemandAllocationStrategy (string) --
Indicates how to allocate instance types to fulfill On-Demand capacity.
The only valid value is prioritized , which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.
OnDemandBaseCapacity (integer) --
The minimum amount of the Auto Scaling group\'s capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group\'s desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.
Note
An update to this setting means a gradual replacement of instances to maintain the specified number of On-Demand Instances for your base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.
OnDemandPercentageAboveBaseCapacity (integer) --
Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity .
Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.
Note
An update to this setting means a gradual replacement of instances to maintain the percentage of On-Demand Instances for your additional capacity above the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.
Valid Range: Minimum value of 0. Maximum value of 100.
SpotAllocationStrategy (string) --
Indicates how to allocate instances across Spot Instance pools.
If the allocation strategy is lowest-price , the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized , the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
The default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price . The default Spot allocation strategy for the AWS Management Console is capacity-optimized .
Valid values: lowest-price | capacity-optimized
SpotInstancePools (integer) --
The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate . Default if not set is 2.
Used only when the Spot allocation strategy is lowest-price .
Valid Range: Minimum value of 1. Maximum value of 20.
SpotMaxPrice (string) --
The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.
To remove a value that you previously set, include the parameter but leave the value blank.
MinSize (integer) --
The minimum size of the group.
MaxSize (integer) --
The maximum size of the group.
DesiredCapacity (integer) --
The desired size of the group.
DefaultCooldown (integer) --
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
AvailabilityZones (list) --
One or more Availability Zones for the group.
(string) --
LoadBalancerNames (list) --
One or more load balancers associated with the group.
(string) --
TargetGroupARNs (list) --
The Amazon Resource Names (ARN) of the target groups for your load balancer.
(string) --
HealthCheckType (string) --
The service to use for the health checks. The valid values are EC2 and ELB . If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.
HealthCheckGracePeriod (integer) --
The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service.
Instances (list) --
The EC2 instances associated with the group.
(dict) --
Describes an EC2 instance.
InstanceId (string) --
The ID of the instance.
InstanceType (string) --
The instance type of the EC2 instance.
AvailabilityZone (string) --
The Availability Zone in which the instance is running.
LifecycleState (string) --
A description of the current lifecycle state. The Quarantined state is not used.
HealthStatus (string) --
The last reported health status of the instance. "Healthy" means that the instance is healthy and should remain in service. "Unhealthy" means that the instance is unhealthy and that Amazon EC2 Auto Scaling should terminate and replace it.
LaunchConfigurationName (string) --
The launch configuration associated with the instance.
LaunchTemplate (dict) --
The launch template for the instance.
LaunchTemplateId (string) --
The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
LaunchTemplateName (string) --
The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
Version (string) --
The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.
If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .
ProtectedFromScaleIn (boolean) --
Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.
WeightedCapacity (string) --
The number of capacity units contributed by the instance based on its instance type.
Valid Range: Minimum value of 1. Maximum value of 999.
CreatedTime (datetime) --
The date and time the group was created.
SuspendedProcesses (list) --
The suspended processes associated with the group.
(dict) --
Describes an automatic scaling process that has been suspended.
For more information, see Scaling Processes in the Amazon EC2 Auto Scaling User Guide .
ProcessName (string) --
The name of the suspended process.
SuspensionReason (string) --
The reason that the process was suspended.
PlacementGroup (string) --
The name of the placement group into which to launch your instances, if any.
VPCZoneIdentifier (string) --
One or more subnet IDs, if applicable, separated by commas.
EnabledMetrics (list) --
The metrics enabled for the group.
(dict) --
Describes an enabled metric.
Metric (string) --
One of the following metrics:
GroupMinSize
GroupMaxSize
GroupDesiredCapacity
GroupInServiceInstances
GroupPendingInstances
GroupStandbyInstances
GroupTerminatingInstances
GroupTotalInstances
GroupInServiceCapacity
GroupPendingCapacity
GroupStandbyCapacity
GroupTerminatingCapacity
GroupTotalCapacity
Granularity (string) --
The granularity of the metric. The only valid value is 1Minute .
Status (string) --
The current state of the group when the DeleteAutoScalingGroup operation is in progress.
Tags (list) --
The tags for the group.
(dict) --
Describes a tag for an Auto Scaling group.
ResourceId (string) --
The name of the group.
ResourceType (string) --
The type of resource. The only supported value is auto-scaling-group .
Key (string) --
The tag key.
Value (string) --
The tag value.
PropagateAtLaunch (boolean) --
Determines whether the tag is added to new instances as they are launched in the group.
TerminationPolicies (list) --
The termination policies for the group.
(string) --
NewInstancesProtectedFromScaleIn (boolean) --
Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in.
ServiceLinkedRoleARN (string) --
The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf.
MaxInstanceLifetime (integer) --
The maximum amount of time, in seconds, that an instance can be in service.
Valid Range: Minimum value of 0.
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the specified Auto Scaling group.
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[
'my-auto-scaling-group',
],
)
print(response)
Expected Output:
{
'AutoScalingGroups': [
{
'AutoScalingGroupARN': 'arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:930d940e-891e-4781-a11a-7b0acd480f03:autoScalingGroupName/my-auto-scaling-group',
'AutoScalingGroupName': 'my-auto-scaling-group',
'AvailabilityZones': [
'us-west-2c',
],
'CreatedTime': datetime(2013, 8, 19, 20, 53, 25, 0, 231, 0),
'DefaultCooldown': 300,
'DesiredCapacity': 1,
'EnabledMetrics': [
],
'HealthCheckGracePeriod': 300,
'HealthCheckType': 'EC2',
'Instances': [
{
'AvailabilityZone': 'us-west-2c',
'HealthStatus': 'Healthy',
'InstanceId': 'i-4ba0837f',
'LaunchConfigurationName': 'my-launch-config',
'LifecycleState': 'InService',
'ProtectedFromScaleIn': False,
},
],
'LaunchConfigurationName': 'my-launch-config',
'LoadBalancerNames': [
],
'MaxSize': 1,
'MinSize': 0,
'NewInstancesProtectedFromScaleIn': False,
'SuspendedProcesses': [
],
'Tags': [
],
'TerminationPolicies': [
'Default',
],
'VPCZoneIdentifier': 'subnet-12345678',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'AutoScalingGroups': [
{
'AutoScalingGroupName': 'string',
'AutoScalingGroupARN': 'string',
'LaunchConfigurationName': 'string',
'LaunchTemplate': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'MixedInstancesPolicy': {
'LaunchTemplate': {
'LaunchTemplateSpecification': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'Overrides': [
{
'InstanceType': 'string',
'WeightedCapacity': 'string'
},
]
},
'InstancesDistribution': {
'OnDemandAllocationStrategy': 'string',
'OnDemandBaseCapacity': 123,
'OnDemandPercentageAboveBaseCapacity': 123,
'SpotAllocationStrategy': 'string',
'SpotInstancePools': 123,
'SpotMaxPrice': 'string'
}
},
'MinSize': 123,
'MaxSize': 123,
'DesiredCapacity': 123,
'DefaultCooldown': 123,
'AvailabilityZones': [
'string',
],
'LoadBalancerNames': [
'string',
],
'TargetGroupARNs': [
'string',
],
'HealthCheckType': 'string',
'HealthCheckGracePeriod': 123,
'Instances': [
{
'InstanceId': 'string',
'InstanceType': 'string',
'AvailabilityZone': 'string',
'LifecycleState': 'Pending'|'Pending:Wait'|'Pending:Proceed'|'Quarantined'|'InService'|'Terminating'|'Terminating:Wait'|'Terminating:Proceed'|'Terminated'|'Detaching'|'Detached'|'EnteringStandby'|'Standby',
'HealthStatus': 'string',
'LaunchConfigurationName': 'string',
'LaunchTemplate': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'ProtectedFromScaleIn': True|False,
'WeightedCapacity': 'string'
},
],
'CreatedTime': datetime(2015, 1, 1),
'SuspendedProcesses': [
{
'ProcessName': 'string',
'SuspensionReason': 'string'
},
],
'PlacementGroup': 'string',
'VPCZoneIdentifier': 'string',
'EnabledMetrics': [
{
'Metric': 'string',
'Granularity': 'string'
},
],
'Status': 'string',
'Tags': [
{
'ResourceId': 'string',
'ResourceType': 'string',
'Key': 'string',
'Value': 'string',
'PropagateAtLaunch': True|False
},
],
'TerminationPolicies': [
'string',
],
'NewInstancesProtectedFromScaleIn': True|False,
'ServiceLinkedRoleARN': 'string',
'MaxInstanceLifetime': 123
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def describe_auto_scaling_instances(InstanceIds=None, MaxRecords=None, NextToken=None):
"""
Describes one or more Auto Scaling instances.
See also: AWS API Documentation
Exceptions
Examples
This example describes the specified Auto Scaling instance.
Expected Output:
:example: response = client.describe_auto_scaling_instances(
InstanceIds=[
'string',
],
MaxRecords=123,
NextToken='string'
)
:type InstanceIds: list
:param InstanceIds: The IDs of the instances. You can specify up to MaxRecords IDs. If you omit this parameter, all Auto Scaling instances are described. If you specify an ID that does not exist, it is ignored with no error.\n\n(string) --\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 50 .
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:rtype: dict
ReturnsResponse Syntax
{
'AutoScalingInstances': [
{
'InstanceId': 'string',
'InstanceType': 'string',
'AutoScalingGroupName': 'string',
'AvailabilityZone': 'string',
'LifecycleState': 'string',
'HealthStatus': 'string',
'LaunchConfigurationName': 'string',
'LaunchTemplate': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'ProtectedFromScaleIn': True|False,
'WeightedCapacity': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
AutoScalingInstances (list) --
The instances.
(dict) --
Describes an EC2 instance associated with an Auto Scaling group.
InstanceId (string) --
The ID of the instance.
InstanceType (string) --
The instance type of the EC2 instance.
AutoScalingGroupName (string) --
The name of the Auto Scaling group for the instance.
AvailabilityZone (string) --
The Availability Zone for the instance.
LifecycleState (string) --
The lifecycle state for the instance.
HealthStatus (string) --
The last reported health status of this instance. "Healthy" means that the instance is healthy and should remain in service. "Unhealthy" means that the instance is unhealthy and Amazon EC2 Auto Scaling should terminate and replace it.
LaunchConfigurationName (string) --
The launch configuration used to launch the instance. This value is not available if you attached the instance to the Auto Scaling group.
LaunchTemplate (dict) --
The launch template for the instance.
LaunchTemplateId (string) --
The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
LaunchTemplateName (string) --
The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
Version (string) --
The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.
If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .
ProtectedFromScaleIn (boolean) --
Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.
WeightedCapacity (string) --
The number of capacity units contributed by the instance based on its instance type.
Valid Range: Minimum value of 1. Maximum value of 999.
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the specified Auto Scaling instance.
response = client.describe_auto_scaling_instances(
InstanceIds=[
'i-4ba0837f',
],
)
print(response)
Expected Output:
{
'AutoScalingInstances': [
{
'AutoScalingGroupName': 'my-auto-scaling-group',
'AvailabilityZone': 'us-west-2c',
'HealthStatus': 'HEALTHY',
'InstanceId': 'i-4ba0837f',
'LaunchConfigurationName': 'my-launch-config',
'LifecycleState': 'InService',
'ProtectedFromScaleIn': False,
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'AutoScalingInstances': [
{
'InstanceId': 'string',
'InstanceType': 'string',
'AutoScalingGroupName': 'string',
'AvailabilityZone': 'string',
'LifecycleState': 'string',
'HealthStatus': 'string',
'LaunchConfigurationName': 'string',
'LaunchTemplate': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'ProtectedFromScaleIn': True|False,
'WeightedCapacity': 'string'
},
],
'NextToken': 'string'
}
:returns:
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def describe_auto_scaling_notification_types():
"""
Describes the notification types that are supported by Amazon EC2 Auto Scaling.
See also: AWS API Documentation
Exceptions
Examples
This example describes the available notification types.
Expected Output:
:example: response = client.describe_auto_scaling_notification_types()
:rtype: dict
ReturnsResponse Syntax{
'AutoScalingNotificationTypes': [
'string',
]
}
Response Structure
(dict) --
AutoScalingNotificationTypes (list) --The notification types.
(string) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the available notification types.
response = client.describe_auto_scaling_notification_types(
)
print(response)
Expected Output:
{
'AutoScalingNotificationTypes': [
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
'autoscaling:TEST_NOTIFICATION',
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'AutoScalingNotificationTypes': [
'string',
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def describe_launch_configurations(LaunchConfigurationNames=None, NextToken=None, MaxRecords=None):
"""
Describes one or more launch configurations.
See also: AWS API Documentation
Exceptions
Examples
This example describes the specified launch configuration.
Expected Output:
:example: response = client.describe_launch_configurations(
LaunchConfigurationNames=[
'string',
],
NextToken='string',
MaxRecords=123
)
:type LaunchConfigurationNames: list
:param LaunchConfigurationNames: The launch configuration names. If you omit this parameter, all launch configurations are described.\n\n(string) --\n\n
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100 .
:rtype: dict
ReturnsResponse Syntax
{
'LaunchConfigurations': [
{
'LaunchConfigurationName': 'string',
'LaunchConfigurationARN': 'string',
'ImageId': 'string',
'KeyName': 'string',
'SecurityGroups': [
'string',
],
'ClassicLinkVPCId': 'string',
'ClassicLinkVPCSecurityGroups': [
'string',
],
'UserData': 'string',
'InstanceType': 'string',
'KernelId': 'string',
'RamdiskId': 'string',
'BlockDeviceMappings': [
{
'VirtualName': 'string',
'DeviceName': 'string',
'Ebs': {
'SnapshotId': 'string',
'VolumeSize': 123,
'VolumeType': 'string',
'DeleteOnTermination': True|False,
'Iops': 123,
'Encrypted': True|False
},
'NoDevice': True|False
},
],
'InstanceMonitoring': {
'Enabled': True|False
},
'SpotPrice': 'string',
'IamInstanceProfile': 'string',
'CreatedTime': datetime(2015, 1, 1),
'EbsOptimized': True|False,
'AssociatePublicIpAddress': True|False,
'PlacementTenancy': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
LaunchConfigurations (list) --
The launch configurations.
(dict) --
Describes a launch configuration.
LaunchConfigurationName (string) --
The name of the launch configuration.
LaunchConfigurationARN (string) --
The Amazon Resource Name (ARN) of the launch configuration.
ImageId (string) --
The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.
For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances .
KeyName (string) --
The name of the key pair.
For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances .
SecurityGroups (list) --
A list that contains the security groups to assign to the instances in the Auto Scaling group.
For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide .
(string) --
ClassicLinkVPCId (string) --
The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.
For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide .
ClassicLinkVPCSecurityGroups (list) --
The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId .
For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide .
(string) --
UserData (string) --
The Base64-encoded user data to make available to the launched EC2 instances.
For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances .
InstanceType (string) --
The instance type for the instances.
For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.
KernelId (string) --
The ID of the kernel associated with the AMI.
RamdiskId (string) --
The ID of the RAM disk associated with the AMI.
BlockDeviceMappings (list) --
A block device mapping, which specifies the block devices for the instance.
For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances .
(dict) --
Describes a block device mapping.
VirtualName (string) --
The name of the virtual device (for example, ephemeral0 ).
You can specify either VirtualName or Ebs , but not both.
DeviceName (string) --
The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh ). For more information, see Device Naming on Linux Instances in the Amazon EC2 User Guide for Linux Instances .
Ebs (dict) --
Parameters used to automatically set up EBS volumes when an instance is launched.
You can specify either VirtualName or Ebs , but not both.
SnapshotId (string) --
The snapshot ID of the volume to use.
Conditional: This parameter is optional if you specify a volume size. If you specify both SnapshotId and VolumeSize , VolumeSize must be equal or greater than the size of the snapshot.
VolumeSize (integer) --
The volume size, in Gibibytes (GiB).
This can be a number from 1-1,024 for standard , 4-16,384 for io1 , 1-16,384 for gp2 , and 500-16,384 for st1 and sc1 . If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.
Default: If you create a volume from a snapshot and you don\'t specify a volume size, the default is the snapshot size.
Note
At least one of VolumeSize or SnapshotId is required.
VolumeType (string) --
The volume type, which can be standard for Magnetic, io1 for Provisioned IOPS SSD, gp2 for General Purpose SSD, st1 for Throughput Optimized HDD, or sc1 for Cold HDD. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances .
Valid Values: standard | io1 | gp2 | st1 | sc1
DeleteOnTermination (boolean) --
Indicates whether the volume is deleted on instance termination. For Amazon EC2 Auto Scaling, the default value is true .
Iops (integer) --
The number of I/O operations per second (IOPS) to provision for the volume. The maximum ratio of IOPS to volume size (in GiB) is 50:1. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances .
Conditional: This parameter is required when the volume type is io1 . (Not used with standard , gp2 , st1 , or sc1 volumes.)
Encrypted (boolean) --
Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types . If your AMI uses encrypted volumes, you can also only launch it on supported instance types.
Note
If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.
Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.
For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide .
NoDevice (boolean) --
Setting this value to true suppresses the specified device included in the block device mapping of the AMI.
If NoDevice is true for the root device, instances might fail the EC2 health check. In that case, Amazon EC2 Auto Scaling launches replacement instances.
If you specify NoDevice , you cannot specify Ebs .
InstanceMonitoring (dict) --
Controls whether instances in this group are launched with detailed (true ) or basic (false ) monitoring.
For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide .
Enabled (boolean) --
If true , detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
SpotPrice (string) --
The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price.
For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .
IamInstanceProfile (string) --
The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.
For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide .
CreatedTime (datetime) --
The creation date and time for the launch configuration.
EbsOptimized (boolean) --
Specifies whether the launch configuration is optimized for EBS I/O (true ) or not (false ).
For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances .
AssociatePublicIpAddress (boolean) --
For Auto Scaling groups that are running in a VPC, specifies whether to assign a public IP address to the group\'s instances.
For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide .
PlacementTenancy (string) --
The tenancy of the instance, either default or dedicated . An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.
For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide .
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the specified launch configuration.
response = client.describe_launch_configurations(
LaunchConfigurationNames=[
'my-launch-config',
],
)
print(response)
Expected Output:
{
'LaunchConfigurations': [
{
'AssociatePublicIpAddress': True,
'BlockDeviceMappings': [
],
'CreatedTime': datetime(2014, 5, 7, 17, 39, 28, 2, 127, 0),
'EbsOptimized': False,
'ImageId': 'ami-043a5034',
'InstanceMonitoring': {
'Enabled': True,
},
'InstanceType': 't1.micro',
'LaunchConfigurationARN': 'arn:aws:autoscaling:us-west-2:123456789012:launchConfiguration:98d3b196-4cf9-4e88-8ca1-8547c24ced8b:launchConfigurationName/my-launch-config',
'LaunchConfigurationName': 'my-launch-config',
'SecurityGroups': [
'sg-67ef0308',
],
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'LaunchConfigurations': [
{
'LaunchConfigurationName': 'string',
'LaunchConfigurationARN': 'string',
'ImageId': 'string',
'KeyName': 'string',
'SecurityGroups': [
'string',
],
'ClassicLinkVPCId': 'string',
'ClassicLinkVPCSecurityGroups': [
'string',
],
'UserData': 'string',
'InstanceType': 'string',
'KernelId': 'string',
'RamdiskId': 'string',
'BlockDeviceMappings': [
{
'VirtualName': 'string',
'DeviceName': 'string',
'Ebs': {
'SnapshotId': 'string',
'VolumeSize': 123,
'VolumeType': 'string',
'DeleteOnTermination': True|False,
'Iops': 123,
'Encrypted': True|False
},
'NoDevice': True|False
},
],
'InstanceMonitoring': {
'Enabled': True|False
},
'SpotPrice': 'string',
'IamInstanceProfile': 'string',
'CreatedTime': datetime(2015, 1, 1),
'EbsOptimized': True|False,
'AssociatePublicIpAddress': True|False,
'PlacementTenancy': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def describe_lifecycle_hook_types():
"""
Describes the available types of lifecycle hooks.
The following hook types are supported:
See also: AWS API Documentation
Exceptions
Examples
This example describes the available lifecycle hook types.
Expected Output:
:example: response = client.describe_lifecycle_hook_types()
:rtype: dict
ReturnsResponse Syntax{
'LifecycleHookTypes': [
'string',
]
}
Response Structure
(dict) --
LifecycleHookTypes (list) --The lifecycle hook types.
(string) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the available lifecycle hook types.
response = client.describe_lifecycle_hook_types(
)
print(response)
Expected Output:
{
'LifecycleHookTypes': [
'autoscaling:EC2_INSTANCE_LAUNCHING',
'autoscaling:EC2_INSTANCE_TERMINATING',
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'LifecycleHookTypes': [
'string',
]
}
:returns:
(string) --
"""
pass
def describe_lifecycle_hooks(AutoScalingGroupName=None, LifecycleHookNames=None):
"""
Describes the lifecycle hooks for the specified Auto Scaling group.
See also: AWS API Documentation
Exceptions
Examples
This example describes the lifecycle hooks for the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_lifecycle_hooks(
AutoScalingGroupName='string',
LifecycleHookNames=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type LifecycleHookNames: list
:param LifecycleHookNames: The names of one or more lifecycle hooks. If you omit this parameter, all lifecycle hooks are described.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'LifecycleHooks': [
{
'LifecycleHookName': 'string',
'AutoScalingGroupName': 'string',
'LifecycleTransition': 'string',
'NotificationTargetARN': 'string',
'RoleARN': 'string',
'NotificationMetadata': 'string',
'HeartbeatTimeout': 123,
'GlobalTimeout': 123,
'DefaultResult': 'string'
},
]
}
Response Structure
(dict) --
LifecycleHooks (list) --
The lifecycle hooks for the specified group.
(dict) --
Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or terminates instances.
LifecycleHookName (string) --
The name of the lifecycle hook.
AutoScalingGroupName (string) --
The name of the Auto Scaling group for the lifecycle hook.
LifecycleTransition (string) --
The state of the EC2 instance to which to attach the lifecycle hook. The following are possible values:
autoscaling:EC2_INSTANCE_LAUNCHING
autoscaling:EC2_INSTANCE_TERMINATING
NotificationTargetARN (string) --
The ARN of the target that Amazon EC2 Auto Scaling sends notifications to when an instance is in the transition state for the lifecycle hook. The notification target can be either an SQS queue or an SNS topic.
RoleARN (string) --
The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.
NotificationMetadata (string) --
Additional information that is included any time Amazon EC2 Auto Scaling sends a message to the notification target.
HeartbeatTimeout (integer) --
The maximum time, in seconds, that can elapse before the lifecycle hook times out. If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the action that you specified in the DefaultResult parameter.
GlobalTimeout (integer) --
The maximum time, in seconds, that an instance can remain in a Pending:Wait or Terminating:Wait state. The maximum is 172800 seconds (48 hours) or 100 times HeartbeatTimeout , whichever is smaller.
DefaultResult (string) --
Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The possible values are CONTINUE and ABANDON .
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the lifecycle hooks for the specified Auto Scaling group.
response = client.describe_lifecycle_hooks(
AutoScalingGroupName='my-auto-scaling-group',
)
print(response)
Expected Output:
{
'LifecycleHooks': [
{
'AutoScalingGroupName': 'my-auto-scaling-group',
'DefaultResult': 'ABANDON',
'GlobalTimeout': 172800,
'HeartbeatTimeout': 3600,
'LifecycleHookName': 'my-lifecycle-hook',
'LifecycleTransition': 'autoscaling:EC2_INSTANCE_LAUNCHING',
'NotificationTargetARN': 'arn:aws:sns:us-west-2:123456789012:my-sns-topic',
'RoleARN': 'arn:aws:iam::123456789012:role/my-auto-scaling-role',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'LifecycleHooks': [
{
'LifecycleHookName': 'string',
'AutoScalingGroupName': 'string',
'LifecycleTransition': 'string',
'NotificationTargetARN': 'string',
'RoleARN': 'string',
'NotificationMetadata': 'string',
'HeartbeatTimeout': 123,
'GlobalTimeout': 123,
'DefaultResult': 'string'
},
]
}
:returns:
autoscaling:EC2_INSTANCE_LAUNCHING
autoscaling:EC2_INSTANCE_TERMINATING
"""
pass
def describe_load_balancer_target_groups(AutoScalingGroupName=None, NextToken=None, MaxRecords=None):
"""
Describes the target groups for the specified Auto Scaling group.
See also: AWS API Documentation
Exceptions
Examples
This example describes the target groups attached to the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_load_balancer_target_groups(
AutoScalingGroupName='string',
NextToken='string',
MaxRecords=123
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 100 and the maximum value is 100 .
:rtype: dict
ReturnsResponse Syntax
{
'LoadBalancerTargetGroups': [
{
'LoadBalancerTargetGroupARN': 'string',
'State': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
LoadBalancerTargetGroups (list) --
Information about the target groups.
(dict) --
Describes the state of a target group.
If you attach a target group to an existing Auto Scaling group, the initial state is Adding . The state transitions to Added after all Auto Scaling instances are registered with the target group. If Elastic Load Balancing health checks are enabled, the state transitions to InService after at least one Auto Scaling instance passes the health check. If EC2 health checks are enabled instead, the target group remains in the Added state.
LoadBalancerTargetGroupARN (string) --
The Amazon Resource Name (ARN) of the target group.
State (string) --
The state of the target group.
Adding - The Auto Scaling instances are being registered with the target group.
Added - All Auto Scaling instances are registered with the target group.
InService - At least one Auto Scaling instance passed an ELB health check.
Removing - The Auto Scaling instances are being deregistered from the target group. If connection draining is enabled, Elastic Load Balancing waits for in-flight requests to complete before deregistering the instances.
Removed - All Auto Scaling instances are deregistered from the target group.
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the target groups attached to the specified Auto Scaling group.
response = client.describe_load_balancer_target_groups(
AutoScalingGroupName='my-auto-scaling-group',
)
print(response)
Expected Output:
{
'LoadBalancerTargetGroups': [
{
'LoadBalancerTargetGroupARN': 'arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067',
'State': 'Added',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'LoadBalancerTargetGroups': [
{
'LoadBalancerTargetGroupARN': 'string',
'State': 'string'
},
],
'NextToken': 'string'
}
:returns:
Adding - The Auto Scaling instances are being registered with the target group.
Added - All Auto Scaling instances are registered with the target group.
InService - At least one Auto Scaling instance passed an ELB health check.
Removing - The Auto Scaling instances are being deregistered from the target group. If connection draining is enabled, Elastic Load Balancing waits for in-flight requests to complete before deregistering the instances.
Removed - All Auto Scaling instances are deregistered from the target group.
"""
pass
def describe_load_balancers(AutoScalingGroupName=None, NextToken=None, MaxRecords=None):
"""
Describes the load balancers for the specified Auto Scaling group.
This operation describes only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use the DescribeLoadBalancerTargetGroups API instead.
See also: AWS API Documentation
Exceptions
Examples
This example describes the load balancers attached to the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_load_balancers(
AutoScalingGroupName='string',
NextToken='string',
MaxRecords=123
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 100 and the maximum value is 100 .
:rtype: dict
ReturnsResponse Syntax
{
'LoadBalancers': [
{
'LoadBalancerName': 'string',
'State': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
LoadBalancers (list) --
The load balancers.
(dict) --
Describes the state of a Classic Load Balancer.
If you specify a load balancer when creating the Auto Scaling group, the state of the load balancer is InService .
If you attach a load balancer to an existing Auto Scaling group, the initial state is Adding . The state transitions to Added after all instances in the group are registered with the load balancer. If Elastic Load Balancing health checks are enabled for the load balancer, the state transitions to InService after at least one instance in the group passes the health check. If EC2 health checks are enabled instead, the load balancer remains in the Added state.
LoadBalancerName (string) --
The name of the load balancer.
State (string) --
One of the following load balancer states:
Adding - The instances in the group are being registered with the load balancer.
Added - All instances in the group are registered with the load balancer.
InService - At least one instance in the group passed an ELB health check.
Removing - The instances in the group are being deregistered from the load balancer. If connection draining is enabled, Elastic Load Balancing waits for in-flight requests to complete before deregistering the instances.
Removed - All instances in the group are deregistered from the load balancer.
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the load balancers attached to the specified Auto Scaling group.
response = client.describe_load_balancers(
AutoScalingGroupName='my-auto-scaling-group',
)
print(response)
Expected Output:
{
'LoadBalancers': [
{
'LoadBalancerName': 'my-load-balancer',
'State': 'Added',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'LoadBalancers': [
{
'LoadBalancerName': 'string',
'State': 'string'
},
],
'NextToken': 'string'
}
:returns:
Adding - The instances in the group are being registered with the load balancer.
Added - All instances in the group are registered with the load balancer.
InService - At least one instance in the group passed an ELB health check.
Removing - The instances in the group are being deregistered from the load balancer. If connection draining is enabled, Elastic Load Balancing waits for in-flight requests to complete before deregistering the instances.
Removed - All instances in the group are deregistered from the load balancer.
"""
pass
def describe_metric_collection_types():
"""
Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.
The GroupStandbyInstances metric is not returned by default. You must explicitly request this metric when calling the EnableMetricsCollection API.
See also: AWS API Documentation
Exceptions
Examples
This example describes the available metric collection types.
Expected Output:
:example: response = client.describe_metric_collection_types()
:rtype: dict
ReturnsResponse Syntax{
'Metrics': [
{
'Metric': 'string'
},
],
'Granularities': [
{
'Granularity': 'string'
},
]
}
Response Structure
(dict) --
Metrics (list) --One or more metrics.
(dict) --Describes a metric.
Metric (string) --One of the following metrics:
GroupMinSize
GroupMaxSize
GroupDesiredCapacity
GroupInServiceInstances
GroupPendingInstances
GroupStandbyInstances
GroupTerminatingInstances
GroupTotalInstances
Granularities (list) --The granularities for the metrics.
(dict) --Describes a granularity of a metric.
Granularity (string) --The granularity. The only valid value is 1Minute .
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the available metric collection types.
response = client.describe_metric_collection_types(
)
print(response)
Expected Output:
{
'Granularities': [
{
'Granularity': '1Minute',
},
],
'Metrics': [
{
'Metric': 'GroupMinSize',
},
{
'Metric': 'GroupMaxSize',
},
{
'Metric': 'GroupDesiredCapacity',
},
{
'Metric': 'GroupInServiceInstances',
},
{
'Metric': 'GroupPendingInstances',
},
{
'Metric': 'GroupTerminatingInstances',
},
{
'Metric': 'GroupStandbyInstances',
},
{
'Metric': 'GroupTotalInstances',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Metrics': [
{
'Metric': 'string'
},
],
'Granularities': [
{
'Granularity': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def describe_notification_configurations(AutoScalingGroupNames=None, NextToken=None, MaxRecords=None):
"""
Describes the notification actions associated with the specified Auto Scaling group.
See also: AWS API Documentation
Exceptions
Examples
This example describes the notification configurations for the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_notification_configurations(
AutoScalingGroupNames=[
'string',
],
NextToken='string',
MaxRecords=123
)
:type AutoScalingGroupNames: list
:param AutoScalingGroupNames: The name of the Auto Scaling group.\n\n(string) --\n\n
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100 .
:rtype: dict
ReturnsResponse Syntax
{
'NotificationConfigurations': [
{
'AutoScalingGroupName': 'string',
'TopicARN': 'string',
'NotificationType': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
NotificationConfigurations (list) --
The notification configurations.
(dict) --
Describes a notification.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
TopicARN (string) --
The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (Amazon SNS) topic.
NotificationType (string) --
One of the following event notification types:
autoscaling:EC2_INSTANCE_LAUNCH
autoscaling:EC2_INSTANCE_LAUNCH_ERROR
autoscaling:EC2_INSTANCE_TERMINATE
autoscaling:EC2_INSTANCE_TERMINATE_ERROR
autoscaling:TEST_NOTIFICATION
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the notification configurations for the specified Auto Scaling group.
response = client.describe_notification_configurations(
AutoScalingGroupNames=[
'my-auto-scaling-group',
],
)
print(response)
Expected Output:
{
'NotificationConfigurations': [
{
'AutoScalingGroupName': 'my-auto-scaling-group',
'NotificationType': 'autoscaling:TEST_NOTIFICATION',
'TopicARN': 'arn:aws:sns:us-west-2:123456789012:my-sns-topic-2',
},
{
'AutoScalingGroupName': 'my-auto-scaling-group',
'NotificationType': 'autoscaling:TEST_NOTIFICATION',
'TopicARN': 'arn:aws:sns:us-west-2:123456789012:my-sns-topic',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'NotificationConfigurations': [
{
'AutoScalingGroupName': 'string',
'TopicARN': 'string',
'NotificationType': 'string'
},
],
'NextToken': 'string'
}
:returns:
autoscaling:EC2_INSTANCE_LAUNCH
autoscaling:EC2_INSTANCE_LAUNCH_ERROR
autoscaling:EC2_INSTANCE_TERMINATE
autoscaling:EC2_INSTANCE_TERMINATE_ERROR
autoscaling:TEST_NOTIFICATION
"""
pass
def describe_policies(AutoScalingGroupName=None, PolicyNames=None, PolicyTypes=None, NextToken=None, MaxRecords=None):
"""
Describes the policies for the specified Auto Scaling group.
See also: AWS API Documentation
Exceptions
Examples
This example describes the policies for the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_policies(
AutoScalingGroupName='string',
PolicyNames=[
'string',
],
PolicyTypes=[
'string',
],
NextToken='string',
MaxRecords=123
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: The name of the Auto Scaling group.
:type PolicyNames: list
:param PolicyNames: The names of one or more policies. If you omit this parameter, all policies are described. If a group name is provided, the results are limited to that group. This list is limited to 50 items. If you specify an unknown policy name, it is ignored with no error.\n\n(string) --\n\n
:type PolicyTypes: list
:param PolicyTypes: One or more policy types. The valid values are SimpleScaling , StepScaling , and TargetTrackingScaling .\n\n(string) --\n\n
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to be returned with each call. The default value is 50 and the maximum value is 100 .
:rtype: dict
ReturnsResponse Syntax
{
'ScalingPolicies': [
{
'AutoScalingGroupName': 'string',
'PolicyName': 'string',
'PolicyARN': 'string',
'PolicyType': 'string',
'AdjustmentType': 'string',
'MinAdjustmentStep': 123,
'MinAdjustmentMagnitude': 123,
'ScalingAdjustment': 123,
'Cooldown': 123,
'StepAdjustments': [
{
'MetricIntervalLowerBound': 123.0,
'MetricIntervalUpperBound': 123.0,
'ScalingAdjustment': 123
},
],
'MetricAggregationType': 'string',
'EstimatedInstanceWarmup': 123,
'Alarms': [
{
'AlarmName': 'string',
'AlarmARN': 'string'
},
],
'TargetTrackingConfiguration': {
'PredefinedMetricSpecification': {
'PredefinedMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'ALBRequestCountPerTarget',
'ResourceLabel': 'string'
},
'CustomizedMetricSpecification': {
'MetricName': 'string',
'Namespace': 'string',
'Dimensions': [
{
'Name': 'string',
'Value': 'string'
},
],
'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',
'Unit': 'string'
},
'TargetValue': 123.0,
'DisableScaleIn': True|False
},
'Enabled': True|False
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
ScalingPolicies (list) --
The scaling policies.
(dict) --
Describes a scaling policy.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
PolicyName (string) --
The name of the scaling policy.
PolicyARN (string) --
The Amazon Resource Name (ARN) of the policy.
PolicyType (string) --
The policy type. The valid values are SimpleScaling , StepScaling , and TargetTrackingScaling .
AdjustmentType (string) --
The adjustment type, which specifies how ScalingAdjustment is interpreted. The valid values are ChangeInCapacity , ExactCapacity , and PercentChangeInCapacity .
MinAdjustmentStep (integer) --
Available for backward compatibility. Use MinAdjustmentMagnitude instead.
MinAdjustmentMagnitude (integer) --
The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity , the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError .
ScalingAdjustment (integer) --
The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.
Cooldown (integer) --
The amount of time, in seconds, after a scaling activity completes before any further dynamic scaling activities can start.
StepAdjustments (list) --
A set of adjustments that enable you to scale based on the size of the alarm breach.
(dict) --
Describes information used to create a step adjustment for a step scaling policy.
For the following examples, suppose that you have an alarm with a breach threshold of 50:
To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.
To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.
There are a few rules for the step adjustments for your step policy:
The ranges of your step adjustments can\'t overlap or have a gap.
At most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.
At most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.
The upper and lower bound can\'t be null in the same step adjustment.
For more information, see Step Adjustments in the Amazon EC2 Auto Scaling User Guide .
MetricIntervalLowerBound (float) --
The lower bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the lower bound is inclusive (the metric must be greater than or equal to the threshold plus the lower bound). Otherwise, it is exclusive (the metric must be greater than the threshold plus the lower bound). A null value indicates negative infinity.
MetricIntervalUpperBound (float) --
The upper bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the upper bound is exclusive (the metric must be less than the threshold plus the upper bound). Otherwise, it is inclusive (the metric must be less than or equal to the threshold plus the upper bound). A null value indicates positive infinity.
The upper bound must be greater than the lower bound.
ScalingAdjustment (integer) --
The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.
MetricAggregationType (string) --
The aggregation type for the CloudWatch metrics. The valid values are Minimum , Maximum , and Average .
EstimatedInstanceWarmup (integer) --
The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics.
Alarms (list) --
The CloudWatch alarms related to the policy.
(dict) --
Describes an alarm.
AlarmName (string) --
The name of the alarm.
AlarmARN (string) --
The Amazon Resource Name (ARN) of the alarm.
TargetTrackingConfiguration (dict) --
A target tracking scaling policy.
PredefinedMetricSpecification (dict) --
A predefined metric. You must specify either a predefined metric or a customized metric.
PredefinedMetricType (string) --
The metric type. The following predefined metrics are available:
ASGAverageCPUUtilization - Average CPU utilization of the Auto Scaling group.
ASGAverageNetworkIn - Average number of bytes received on all network interfaces by the Auto Scaling group.
ASGAverageNetworkOut - Average number of bytes sent out on all network interfaces by the Auto Scaling group.
ALBRequestCountPerTarget - Number of requests completed per target in an Application Load Balancer target group.
ResourceLabel (string) --
Identifies the resource associated with the metric type. You can\'t specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.
The format is ``app/load-balancer-name /load-balancer-id /targetgroup/target-group-name /target-group-id `` , where
``app/load-balancer-name /load-balancer-id `` is the final portion of the load balancer ARN, and
``targetgroup/target-group-name /target-group-id `` is the final portion of the target group ARN.
CustomizedMetricSpecification (dict) --
A customized metric. You must specify either a predefined metric or a customized metric.
MetricName (string) --
The name of the metric.
Namespace (string) --
The namespace of the metric.
Dimensions (list) --
The dimensions of the metric.
Conditional: If you published your metric with dimensions, you must specify the same dimensions in your scaling policy.
(dict) --
Describes the dimension of a metric.
Name (string) --
The name of the dimension.
Value (string) --
The value of the dimension.
Statistic (string) --
The statistic of the metric.
Unit (string) --
The unit of the metric.
TargetValue (float) --
The target value for the metric.
DisableScaleIn (boolean) --
Indicates whether scaling in by the target tracking scaling policy is disabled. If scaling in is disabled, the target tracking scaling policy doesn\'t remove instances from the Auto Scaling group. Otherwise, the target tracking scaling policy can remove instances from the Auto Scaling group. The default is false .
Enabled (boolean) --
Indicates whether the policy is enabled (true ) or disabled (false ).
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
Examples
This example describes the policies for the specified Auto Scaling group.
response = client.describe_policies(
AutoScalingGroupName='my-auto-scaling-group',
)
print(response)
Expected Output:
{
'ScalingPolicies': [
{
'AdjustmentType': 'ChangeInCapacity',
'Alarms': [
],
'AutoScalingGroupName': 'my-auto-scaling-group',
'PolicyARN': 'arn:aws:autoscaling:us-west-2:123456789012:scalingPolicy:2233f3d7-6290-403b-b632-93c553560106:autoScalingGroupName/my-auto-scaling-group:policyName/ScaleIn',
'PolicyName': 'ScaleIn',
'ScalingAdjustment': -1,
},
{
'AdjustmentType': 'PercentChangeInCapacity',
'Alarms': [
],
'AutoScalingGroupName': 'my-auto-scaling-group',
'Cooldown': 60,
'MinAdjustmentStep': 2,
'PolicyARN': 'arn:aws:autoscaling:us-west-2:123456789012:scalingPolicy:2b435159-cf77-4e89-8c0e-d63b497baad7:autoScalingGroupName/my-auto-scaling-group:policyName/ScalePercentChange',
'PolicyName': 'ScalePercentChange',
'ScalingAdjustment': 25,
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ScalingPolicies': [
{
'AutoScalingGroupName': 'string',
'PolicyName': 'string',
'PolicyARN': 'string',
'PolicyType': 'string',
'AdjustmentType': 'string',
'MinAdjustmentStep': 123,
'MinAdjustmentMagnitude': 123,
'ScalingAdjustment': 123,
'Cooldown': 123,
'StepAdjustments': [
{
'MetricIntervalLowerBound': 123.0,
'MetricIntervalUpperBound': 123.0,
'ScalingAdjustment': 123
},
],
'MetricAggregationType': 'string',
'EstimatedInstanceWarmup': 123,
'Alarms': [
{
'AlarmName': 'string',
'AlarmARN': 'string'
},
],
'TargetTrackingConfiguration': {
'PredefinedMetricSpecification': {
'PredefinedMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'ALBRequestCountPerTarget',
'ResourceLabel': 'string'
},
'CustomizedMetricSpecification': {
'MetricName': 'string',
'Namespace': 'string',
'Dimensions': [
{
'Name': 'string',
'Value': 'string'
},
],
'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',
'Unit': 'string'
},
'TargetValue': 123.0,
'DisableScaleIn': True|False
},
'Enabled': True|False
},
],
'NextToken': 'string'
}
:returns:
To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.
To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.
"""
pass
def describe_scaling_activities(ActivityIds=None, AutoScalingGroupName=None, MaxRecords=None, NextToken=None):
"""
Describes one or more scaling activities for the specified Auto Scaling group.
See also: AWS API Documentation
Exceptions
Examples
This example describes the scaling activities for the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_scaling_activities(
ActivityIds=[
'string',
],
AutoScalingGroupName='string',
MaxRecords=123,
NextToken='string'
)
:type ActivityIds: list
:param ActivityIds: The activity IDs of the desired scaling activities. You can specify up to 50 IDs. If you omit this parameter, all activities for the past six weeks are described. If unknown activities are requested, they are ignored with no error. If you specify an Auto Scaling group, the results are limited to that group.\n\n(string) --\n\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: The name of the Auto Scaling group.
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 100 and the maximum value is 100 .
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:rtype: dict
ReturnsResponse Syntax
{
'Activities': [
{
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Activities (list) --
The scaling activities. Activities are sorted by start time. Activities still in progress are described first.
(dict) --
Describes scaling activity, which is a long-running process that represents a change to your Auto Scaling group, such as changing its size or replacing an instance.
ActivityId (string) --
The ID of the activity.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
Description (string) --
A friendly, more verbose description of the activity.
Cause (string) --
The reason the activity began.
StartTime (datetime) --
The start time of the activity.
EndTime (datetime) --
The end time of the activity.
StatusCode (string) --
The current status of the activity.
StatusMessage (string) --
A friendly, more verbose description of the activity status.
Progress (integer) --
A value between 0 and 100 that indicates the progress of the activity.
Details (string) --
The details about the activity.
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the scaling activities for the specified Auto Scaling group.
response = client.describe_scaling_activities(
AutoScalingGroupName='my-auto-scaling-group',
)
print(response)
Expected Output:
{
'Activities': [
{
'ActivityId': 'f9f2d65b-f1f2-43e7-b46d-d86756459699',
'AutoScalingGroupName': 'my-auto-scaling-group',
'Cause': 'At 2013-08-19T20:53:25Z a user request created an AutoScalingGroup changing the desired capacity from 0 to 1. At 2013-08-19T20:53:29Z an instance was started in response to a difference between desired and actual capacity, increasing the capacity from 0 to 1.',
'Description': 'Launching a new EC2 instance: i-4ba0837f',
'Details': 'details',
'EndTime': datetime(2013, 8, 19, 20, 54, 2, 0, 231, 0),
'Progress': 100,
'StartTime': datetime(2013, 8, 19, 20, 53, 29, 0, 231, 0),
'StatusCode': 'Successful',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Activities': [
{
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
},
],
'NextToken': 'string'
}
:returns:
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def describe_scaling_process_types():
"""
Describes the scaling process types for use with the ResumeProcesses and SuspendProcesses APIs.
See also: AWS API Documentation
Exceptions
Examples
This example describes the Auto Scaling process types.
Expected Output:
:example: response = client.describe_scaling_process_types()
:rtype: dict
ReturnsResponse Syntax{
'Processes': [
{
'ProcessName': 'string'
},
]
}
Response Structure
(dict) --
Processes (list) --The names of the process types.
(dict) --Describes a process type.
For more information, see Scaling Processes in the Amazon EC2 Auto Scaling User Guide .
ProcessName (string) --One of the following processes:
Launch
Terminate
AddToLoadBalancer
AlarmNotification
AZRebalance
HealthCheck
ReplaceUnhealthy
ScheduledActions
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the Auto Scaling process types.
response = client.describe_scaling_process_types(
)
print(response)
Expected Output:
{
'Processes': [
{
'ProcessName': 'AZRebalance',
},
{
'ProcessName': 'AddToLoadBalancer',
},
{
'ProcessName': 'AlarmNotification',
},
{
'ProcessName': 'HealthCheck',
},
{
'ProcessName': 'Launch',
},
{
'ProcessName': 'ReplaceUnhealthy',
},
{
'ProcessName': 'ScheduledActions',
},
{
'ProcessName': 'Terminate',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Processes': [
{
'ProcessName': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def describe_scheduled_actions(AutoScalingGroupName=None, ScheduledActionNames=None, StartTime=None, EndTime=None, NextToken=None, MaxRecords=None):
"""
Describes the actions scheduled for your Auto Scaling group that haven\'t run or that have not reached their end time. To describe the actions that have already run, call the DescribeScalingActivities API.
See also: AWS API Documentation
Exceptions
Examples
This example describes the scheduled actions for the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_scheduled_actions(
AutoScalingGroupName='string',
ScheduledActionNames=[
'string',
],
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
NextToken='string',
MaxRecords=123
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: The name of the Auto Scaling group.
:type ScheduledActionNames: list
:param ScheduledActionNames: The names of one or more scheduled actions. You can specify up to 50 actions. If you omit this parameter, all scheduled actions are described. If you specify an unknown scheduled action, it is ignored with no error.\n\n(string) --\n\n
:type StartTime: datetime
:param StartTime: The earliest scheduled start time to return. If scheduled action names are provided, this parameter is ignored.
:type EndTime: datetime
:param EndTime: The latest scheduled start time to return. If scheduled action names are provided, this parameter is ignored.
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100 .
:rtype: dict
ReturnsResponse Syntax
{
'ScheduledUpdateGroupActions': [
{
'AutoScalingGroupName': 'string',
'ScheduledActionName': 'string',
'ScheduledActionARN': 'string',
'Time': datetime(2015, 1, 1),
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'Recurrence': 'string',
'MinSize': 123,
'MaxSize': 123,
'DesiredCapacity': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
ScheduledUpdateGroupActions (list) --
The scheduled actions.
(dict) --
Describes a scheduled scaling action.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
ScheduledActionName (string) --
The name of the scheduled action.
ScheduledActionARN (string) --
The Amazon Resource Name (ARN) of the scheduled action.
Time (datetime) --
This parameter is no longer used.
StartTime (datetime) --
The date and time in UTC for this action to start. For example, "2019-06-01T00:00:00Z" .
EndTime (datetime) --
The date and time in UTC for the recurring schedule to end. For example, "2019-06-01T00:00:00Z" .
Recurrence (string) --
The recurring schedule for the action, in Unix cron syntax format.
When StartTime and EndTime are specified with Recurrence , they form the boundaries of when the recurring action starts and stops.
MinSize (integer) --
The minimum size of the Auto Scaling group.
MaxSize (integer) --
The maximum size of the Auto Scaling group.
DesiredCapacity (integer) --
The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain.
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the scheduled actions for the specified Auto Scaling group.
response = client.describe_scheduled_actions(
AutoScalingGroupName='my-auto-scaling-group',
)
print(response)
Expected Output:
{
'ScheduledUpdateGroupActions': [
{
'AutoScalingGroupName': 'my-auto-scaling-group',
'DesiredCapacity': 4,
'MaxSize': 6,
'MinSize': 2,
'Recurrence': '30 0 1 12 0',
'ScheduledActionARN': 'arn:aws:autoscaling:us-west-2:123456789012:scheduledUpdateGroupAction:8e86b655-b2e6-4410-8f29-b4f094d6871c:autoScalingGroupName/my-auto-scaling-group:scheduledActionName/my-scheduled-action',
'ScheduledActionName': 'my-scheduled-action',
'StartTime': datetime(2016, 12, 1, 0, 30, 0, 3, 336, 0),
'Time': datetime(2016, 12, 1, 0, 30, 0, 3, 336, 0),
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ScheduledUpdateGroupActions': [
{
'AutoScalingGroupName': 'string',
'ScheduledActionName': 'string',
'ScheduledActionARN': 'string',
'Time': datetime(2015, 1, 1),
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'Recurrence': 'string',
'MinSize': 123,
'MaxSize': 123,
'DesiredCapacity': 123
},
],
'NextToken': 'string'
}
:returns:
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def describe_tags(Filters=None, NextToken=None, MaxRecords=None):
"""
Describes the specified tags.
You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.
You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there\'s no match, no special message is returned.
For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example describes the tags for the specified Auto Scaling group.
Expected Output:
:example: response = client.describe_tags(
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
NextToken='string',
MaxRecords=123
)
:type Filters: list
:param Filters: One or more filters to scope the tags to return. The maximum number of filters per filter type (for example, auto-scaling-group ) is 1000.\n\n(dict) --Describes a filter that is used to return a more specific list of results when describing tags.\nFor more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide .\n\nName (string) --The name of the filter. The valid values are: auto-scaling-group , key , value , and propagate-at-launch .\n\nValues (list) --One or more filter values. Filter values are case-sensitive.\n\n(string) --\n\n\n\n\n\n
:type NextToken: string
:param NextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type MaxRecords: integer
:param MaxRecords: The maximum number of items to return with this call. The default value is 50 and the maximum value is 100 .
:rtype: dict
ReturnsResponse Syntax
{
'Tags': [
{
'ResourceId': 'string',
'ResourceType': 'string',
'Key': 'string',
'Value': 'string',
'PropagateAtLaunch': True|False
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Tags (list) --
One or more tags.
(dict) --
Describes a tag for an Auto Scaling group.
ResourceId (string) --
The name of the group.
ResourceType (string) --
The type of resource. The only supported value is auto-scaling-group .
Key (string) --
The tag key.
Value (string) --
The tag value.
PropagateAtLaunch (boolean) --
Determines whether the tag is added to new instances as they are launched in the group.
NextToken (string) --
A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.
Exceptions
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the tags for the specified Auto Scaling group.
response = client.describe_tags(
Filters=[
{
'Name': 'auto-scaling-group',
'Values': [
'my-auto-scaling-group',
],
},
],
)
print(response)
Expected Output:
{
'Tags': [
{
'Key': 'Dept',
'PropagateAtLaunch': True,
'ResourceId': 'my-auto-scaling-group',
'ResourceType': 'auto-scaling-group',
'Value': 'Research',
},
{
'Key': 'Role',
'PropagateAtLaunch': True,
'ResourceId': 'my-auto-scaling-group',
'ResourceType': 'auto-scaling-group',
'Value': 'WebServer',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Tags': [
{
'ResourceId': 'string',
'ResourceType': 'string',
'Key': 'string',
'Value': 'string',
'PropagateAtLaunch': True|False
},
],
'NextToken': 'string'
}
:returns:
AutoScaling.Client.exceptions.InvalidNextToken
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def describe_termination_policy_types():
"""
Describes the termination policies supported by Amazon EC2 Auto Scaling.
For more information, see Controlling Which Auto Scaling Instances Terminate During Scale In in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example describes the available termination policy types.
Expected Output:
:example: response = client.describe_termination_policy_types()
:rtype: dict
ReturnsResponse Syntax{
'TerminationPolicyTypes': [
'string',
]
}
Response Structure
(dict) --
TerminationPolicyTypes (list) --The termination policies supported by Amazon EC2 Auto Scaling: OldestInstance , OldestLaunchConfiguration , NewestInstance , ClosestToNextInstanceHour , Default , OldestLaunchTemplate , and AllocationStrategy .
(string) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the available termination policy types.
response = client.describe_termination_policy_types(
)
print(response)
Expected Output:
{
'TerminationPolicyTypes': [
'ClosestToNextInstanceHour',
'Default',
'NewestInstance',
'OldestInstance',
'OldestLaunchConfiguration',
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'TerminationPolicyTypes': [
'string',
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def detach_instances(InstanceIds=None, AutoScalingGroupName=None, ShouldDecrementDesiredCapacity=None):
"""
Removes one or more instances from the specified Auto Scaling group.
After the instances are detached, you can manage them independent of the Auto Scaling group.
If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.
If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.
For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example detaches the specified instance from the specified Auto Scaling group.
Expected Output:
:example: response = client.detach_instances(
InstanceIds=[
'string',
],
AutoScalingGroupName='string',
ShouldDecrementDesiredCapacity=True|False
)
:type InstanceIds: list
:param InstanceIds: The IDs of the instances. You can specify up to 20 instances.\n\n(string) --\n\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ShouldDecrementDesiredCapacity: boolean
:param ShouldDecrementDesiredCapacity: [REQUIRED]\nIndicates whether the Auto Scaling group decrements the desired capacity value by the number of instances detached.\n
:rtype: dict
ReturnsResponse Syntax
{
'Activities': [
{
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
},
]
}
Response Structure
(dict) --
Activities (list) --
The activities related to detaching the instances from the Auto Scaling group.
(dict) --
Describes scaling activity, which is a long-running process that represents a change to your Auto Scaling group, such as changing its size or replacing an instance.
ActivityId (string) --
The ID of the activity.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
Description (string) --
A friendly, more verbose description of the activity.
Cause (string) --
The reason the activity began.
StartTime (datetime) --
The start time of the activity.
EndTime (datetime) --
The end time of the activity.
StatusCode (string) --
The current status of the activity.
StatusMessage (string) --
A friendly, more verbose description of the activity status.
Progress (integer) --
A value between 0 and 100 that indicates the progress of the activity.
Details (string) --
The details about the activity.
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example detaches the specified instance from the specified Auto Scaling group.
response = client.detach_instances(
AutoScalingGroupName='my-auto-scaling-group',
InstanceIds=[
'i-93633f9b',
],
ShouldDecrementDesiredCapacity=True,
)
print(response)
Expected Output:
{
'Activities': [
{
'ActivityId': '5091cb52-547a-47ce-a236-c9ccbc2cb2c9',
'AutoScalingGroupName': 'my-auto-scaling-group',
'Cause': 'At 2015-04-12T15:02:16Z instance i-93633f9b was detached in response to a user request, shrinking the capacity from 2 to 1.',
'Description': 'Detaching EC2 instance: i-93633f9b',
'Details': 'details',
'Progress': 50,
'StartTime': datetime(2015, 4, 12, 15, 2, 16, 6, 102, 0),
'StatusCode': 'InProgress',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Activities': [
{
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def detach_load_balancer_target_groups(AutoScalingGroupName=None, TargetGroupARNs=None):
"""
Detaches one or more target groups from the specified Auto Scaling group.
See also: AWS API Documentation
Exceptions
Examples
This example detaches the specified target group from the specified Auto Scaling group
Expected Output:
:example: response = client.detach_load_balancer_target_groups(
AutoScalingGroupName='string',
TargetGroupARNs=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type TargetGroupARNs: list
:param TargetGroupARNs: [REQUIRED]\nThe Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target groups.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example detaches the specified target group from the specified Auto Scaling group
response = client.detach_load_balancer_target_groups(
AutoScalingGroupName='my-auto-scaling-group',
TargetGroupARNs=[
'arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067',
],
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
(dict) --
"""
pass
def detach_load_balancers(AutoScalingGroupName=None, LoadBalancerNames=None):
"""
Detaches one or more Classic Load Balancers from the specified Auto Scaling group.
This operation detaches only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use the DetachLoadBalancerTargetGroups API instead.
When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using the DescribeLoadBalancers API call. The instances remain running.
See also: AWS API Documentation
Exceptions
Examples
This example detaches the specified load balancer from the specified Auto Scaling group.
Expected Output:
:example: response = client.detach_load_balancers(
AutoScalingGroupName='string',
LoadBalancerNames=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type LoadBalancerNames: list
:param LoadBalancerNames: [REQUIRED]\nThe names of the load balancers. You can specify up to 10 load balancers.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example detaches the specified load balancer from the specified Auto Scaling group.
response = client.detach_load_balancers(
AutoScalingGroupName='my-auto-scaling-group',
LoadBalancerNames=[
'my-load-balancer',
],
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
(dict) --
"""
pass
def disable_metrics_collection(AutoScalingGroupName=None, Metrics=None):
"""
Disables group metrics for the specified Auto Scaling group.
See also: AWS API Documentation
Exceptions
Examples
This example disables collecting data for the GroupDesiredCapacity metric for the specified Auto Scaling group.
Expected Output:
:example: response = client.disable_metrics_collection(
AutoScalingGroupName='string',
Metrics=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type Metrics: list
:param Metrics: Specifies one or more of the following metrics:\n\nGroupMinSize\nGroupMaxSize\nGroupDesiredCapacity\nGroupInServiceInstances\nGroupPendingInstances\nGroupStandbyInstances\nGroupTerminatingInstances\nGroupTotalInstances\nGroupInServiceCapacity\nGroupPendingCapacity\nGroupStandbyCapacity\nGroupTerminatingCapacity\nGroupTotalCapacity\n\nIf you omit this parameter, all metrics are disabled.\n\n(string) --\n\n
:return: response = client.disable_metrics_collection(
AutoScalingGroupName='my-auto-scaling-group',
Metrics=[
'GroupDesiredCapacity',
],
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def enable_metrics_collection(AutoScalingGroupName=None, Metrics=None, Granularity=None):
"""
Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring Your Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example enables data collection for the specified Auto Scaling group.
Expected Output:
:example: response = client.enable_metrics_collection(
AutoScalingGroupName='string',
Metrics=[
'string',
],
Granularity='string'
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type Metrics: list
:param Metrics: Specifies which group-level metrics to start collecting. You can specify one or more of the following metrics:\n\nGroupMinSize\nGroupMaxSize\nGroupDesiredCapacity\nGroupInServiceInstances\nGroupPendingInstances\nGroupStandbyInstances\nGroupTerminatingInstances\nGroupTotalInstances\n\nThe instance weighting feature supports the following additional metrics:\n\nGroupInServiceCapacity\nGroupPendingCapacity\nGroupStandbyCapacity\nGroupTerminatingCapacity\nGroupTotalCapacity\n\nIf you omit this parameter, all metrics are enabled.\n\n(string) --\n\n
:type Granularity: string
:param Granularity: [REQUIRED]\nThe granularity to associate with the metrics to collect. The only valid value is 1Minute .\n
:return: response = client.enable_metrics_collection(
AutoScalingGroupName='my-auto-scaling-group',
Granularity='1Minute',
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def enter_standby(InstanceIds=None, AutoScalingGroupName=None, ShouldDecrementDesiredCapacity=None):
"""
Moves the specified instances into the standby state.
If you choose to decrement the desired capacity of the Auto Scaling group, the instances can enter standby as long as the desired capacity of the Auto Scaling group after the instances are placed into standby is equal to or greater than the minimum capacity of the group.
If you choose not to decrement the desired capacity of the Auto Scaling group, the Auto Scaling group launches new instances to replace the instances on standby.
For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example puts the specified instance into standby mode.
Expected Output:
:example: response = client.enter_standby(
InstanceIds=[
'string',
],
AutoScalingGroupName='string',
ShouldDecrementDesiredCapacity=True|False
)
:type InstanceIds: list
:param InstanceIds: The IDs of the instances. You can specify up to 20 instances.\n\n(string) --\n\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ShouldDecrementDesiredCapacity: boolean
:param ShouldDecrementDesiredCapacity: [REQUIRED]\nIndicates whether to decrement the desired capacity of the Auto Scaling group by the number of instances moved to Standby mode.\n
:rtype: dict
ReturnsResponse Syntax
{
'Activities': [
{
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
},
]
}
Response Structure
(dict) --
Activities (list) --
The activities related to moving instances into Standby mode.
(dict) --
Describes scaling activity, which is a long-running process that represents a change to your Auto Scaling group, such as changing its size or replacing an instance.
ActivityId (string) --
The ID of the activity.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
Description (string) --
A friendly, more verbose description of the activity.
Cause (string) --
The reason the activity began.
StartTime (datetime) --
The start time of the activity.
EndTime (datetime) --
The end time of the activity.
StatusCode (string) --
The current status of the activity.
StatusMessage (string) --
A friendly, more verbose description of the activity status.
Progress (integer) --
A value between 0 and 100 that indicates the progress of the activity.
Details (string) --
The details about the activity.
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example puts the specified instance into standby mode.
response = client.enter_standby(
AutoScalingGroupName='my-auto-scaling-group',
InstanceIds=[
'i-93633f9b',
],
ShouldDecrementDesiredCapacity=True,
)
print(response)
Expected Output:
{
'Activities': [
{
'ActivityId': 'ffa056b4-6ed3-41ba-ae7c-249dfae6eba1',
'AutoScalingGroupName': 'my-auto-scaling-group',
'Cause': 'At 2015-04-12T15:10:23Z instance i-93633f9b was moved to standby in response to a user request, shrinking the capacity from 2 to 1.',
'Description': 'Moving EC2 instance to Standby: i-93633f9b',
'Details': 'details',
'Progress': 50,
'StartTime': datetime(2015, 4, 12, 15, 10, 23, 6, 102, 0),
'StatusCode': 'InProgress',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Activities': [
{
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def execute_policy(AutoScalingGroupName=None, PolicyName=None, HonorCooldown=None, MetricValue=None, BreachThreshold=None):
"""
Executes the specified policy.
See also: AWS API Documentation
Exceptions
Examples
This example executes the specified Auto Scaling policy for the specified Auto Scaling group.
Expected Output:
:example: response = client.execute_policy(
AutoScalingGroupName='string',
PolicyName='string',
HonorCooldown=True|False,
MetricValue=123.0,
BreachThreshold=123.0
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: The name of the Auto Scaling group.
:type PolicyName: string
:param PolicyName: [REQUIRED]\nThe name or ARN of the policy.\n
:type HonorCooldown: boolean
:param HonorCooldown: Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.\nThis parameter is not supported if the policy type is StepScaling or TargetTrackingScaling .\nFor more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n
:type MetricValue: float
:param MetricValue: The metric value to compare to BreachThreshold . This enables you to execute a policy of type StepScaling and determine which step adjustment to use. For example, if the breach threshold is 50 and you want to use a step adjustment with a lower bound of 0 and an upper bound of 10, you can set the metric value to 59.\nIf you specify a metric value that doesn\'t correspond to a step adjustment for the policy, the call returns an error.\nConditional: This parameter is required if the policy type is StepScaling and not supported otherwise.\n
:type BreachThreshold: float
:param BreachThreshold: The breach threshold for the alarm.\nConditional: This parameter is required if the policy type is StepScaling and not supported otherwise.\n
:return: response = client.execute_policy(
AutoScalingGroupName='my-auto-scaling-group',
HonorCooldown=True,
PolicyName='ScaleIn',
)
print(response)
:returns:
AutoScaling.Client.exceptions.ScalingActivityInProgressFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def exit_standby(InstanceIds=None, AutoScalingGroupName=None):
"""
Moves the specified instances out of the standby state.
After you put the instances back in service, the desired capacity is incremented.
For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example moves the specified instance out of standby mode.
Expected Output:
:example: response = client.exit_standby(
InstanceIds=[
'string',
],
AutoScalingGroupName='string'
)
:type InstanceIds: list
:param InstanceIds: The IDs of the instances. You can specify up to 20 instances.\n\n(string) --\n\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:rtype: dict
ReturnsResponse Syntax
{
'Activities': [
{
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
},
]
}
Response Structure
(dict) --
Activities (list) --
The activities related to moving instances out of Standby mode.
(dict) --
Describes scaling activity, which is a long-running process that represents a change to your Auto Scaling group, such as changing its size or replacing an instance.
ActivityId (string) --
The ID of the activity.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
Description (string) --
A friendly, more verbose description of the activity.
Cause (string) --
The reason the activity began.
StartTime (datetime) --
The start time of the activity.
EndTime (datetime) --
The end time of the activity.
StatusCode (string) --
The current status of the activity.
StatusMessage (string) --
A friendly, more verbose description of the activity status.
Progress (integer) --
A value between 0 and 100 that indicates the progress of the activity.
Details (string) --
The details about the activity.
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example moves the specified instance out of standby mode.
response = client.exit_standby(
AutoScalingGroupName='my-auto-scaling-group',
InstanceIds=[
'i-93633f9b',
],
)
print(response)
Expected Output:
{
'Activities': [
{
'ActivityId': '142928e1-a2dc-453a-9b24-b85ad6735928',
'AutoScalingGroupName': 'my-auto-scaling-group',
'Cause': 'At 2015-04-12T15:14:29Z instance i-93633f9b was moved out of standby in response to a user request, increasing the capacity from 1 to 2.',
'Description': 'Moving EC2 instance out of Standby: i-93633f9b',
'Details': 'details',
'Progress': 30,
'StartTime': datetime(2015, 4, 12, 15, 14, 29, 6, 102, 0),
'StatusCode': 'PreInService',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Activities': [
{
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def put_lifecycle_hook(LifecycleHookName=None, AutoScalingGroupName=None, LifecycleTransition=None, RoleARN=None, NotificationTargetARN=None, NotificationMetadata=None, HeartbeatTimeout=None, DefaultResult=None):
"""
Creates or updates a lifecycle hook for the specified Auto Scaling group.
A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide .
If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.
You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.
See also: AWS API Documentation
Exceptions
Examples
This example creates a lifecycle hook.
Expected Output:
:example: response = client.put_lifecycle_hook(
LifecycleHookName='string',
AutoScalingGroupName='string',
LifecycleTransition='string',
RoleARN='string',
NotificationTargetARN='string',
NotificationMetadata='string',
HeartbeatTimeout=123,
DefaultResult='string'
)
:type LifecycleHookName: string
:param LifecycleHookName: [REQUIRED]\nThe name of the lifecycle hook.\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type LifecycleTransition: string
:param LifecycleTransition: The instance state to which you want to attach the lifecycle hook. The valid values are:\n\nautoscaling:EC2_INSTANCE_LAUNCHING\nautoscaling:EC2_INSTANCE_TERMINATING\n\nConditional: This parameter is required for new lifecycle hooks, but optional when updating existing hooks.\n
:type RoleARN: string
:param RoleARN: The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.\nConditional: This parameter is required for new lifecycle hooks, but optional when updating existing hooks.\n
:type NotificationTargetARN: string
:param NotificationTargetARN: The ARN of the notification target that Amazon EC2 Auto Scaling uses to notify you when an instance is in the transition state for the lifecycle hook. This target can be either an SQS queue or an SNS topic.\nIf you specify an empty string, this overrides the current ARN.\nThis operation uses the JSON format when sending notifications to an Amazon SQS queue, and an email key-value pair format when sending notifications to an Amazon SNS topic.\nWhen you specify a notification target, Amazon EC2 Auto Scaling sends it a test message. Test messages contain the following additional key-value pair: 'Event': 'autoscaling:TEST_NOTIFICATION' .\n
:type NotificationMetadata: string
:param NotificationMetadata: Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.
:type HeartbeatTimeout: integer
:param HeartbeatTimeout: The maximum time, in seconds, that can elapse before the lifecycle hook times out. The range is from 30 to 7200 seconds. The default value is 3600 seconds (1 hour).\nIf the lifecycle hook times out, Amazon EC2 Auto Scaling performs the action that you specified in the DefaultResult parameter. You can prevent the lifecycle hook from timing out by calling the RecordLifecycleActionHeartbeat API.\n
:type DefaultResult: string
:param DefaultResult: Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. This parameter can be either CONTINUE or ABANDON . The default value is ABANDON .
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example creates a lifecycle hook.
response = client.put_lifecycle_hook(
AutoScalingGroupName='my-auto-scaling-group',
LifecycleHookName='my-lifecycle-hook',
LifecycleTransition='autoscaling:EC2_INSTANCE_LAUNCHING',
NotificationTargetARN='arn:aws:sns:us-west-2:123456789012:my-sns-topic --role-arn',
RoleARN='arn:aws:iam::123456789012:role/my-auto-scaling-role',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
LifecycleHookName (string) -- [REQUIRED]
The name of the lifecycle hook.
AutoScalingGroupName (string) -- [REQUIRED]
The name of the Auto Scaling group.
LifecycleTransition (string) -- The instance state to which you want to attach the lifecycle hook. The valid values are:
autoscaling:EC2_INSTANCE_LAUNCHING
autoscaling:EC2_INSTANCE_TERMINATING
Conditional: This parameter is required for new lifecycle hooks, but optional when updating existing hooks.
RoleARN (string) -- The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.
Conditional: This parameter is required for new lifecycle hooks, but optional when updating existing hooks.
NotificationTargetARN (string) -- The ARN of the notification target that Amazon EC2 Auto Scaling uses to notify you when an instance is in the transition state for the lifecycle hook. This target can be either an SQS queue or an SNS topic.
If you specify an empty string, this overrides the current ARN.
This operation uses the JSON format when sending notifications to an Amazon SQS queue, and an email key-value pair format when sending notifications to an Amazon SNS topic.
When you specify a notification target, Amazon EC2 Auto Scaling sends it a test message. Test messages contain the following additional key-value pair: "Event": "autoscaling:TEST_NOTIFICATION" .
NotificationMetadata (string) -- Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.
HeartbeatTimeout (integer) -- The maximum time, in seconds, that can elapse before the lifecycle hook times out. The range is from 30 to 7200 seconds. The default value is 3600 seconds (1 hour).
If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the action that you specified in the DefaultResult parameter. You can prevent the lifecycle hook from timing out by calling the RecordLifecycleActionHeartbeat API.
DefaultResult (string) -- Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. This parameter can be either CONTINUE or ABANDON . The default value is ABANDON .
"""
pass
def put_notification_configuration(AutoScalingGroupName=None, TopicARN=None, NotificationTypes=None):
"""
Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.
This configuration overwrites any existing configuration.
For more information, see Getting Amazon SNS Notifications When Your Auto Scaling Group Scales in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example adds the specified notification to the specified Auto Scaling group.
Expected Output:
:example: response = client.put_notification_configuration(
AutoScalingGroupName='string',
TopicARN='string',
NotificationTypes=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type TopicARN: string
:param TopicARN: [REQUIRED]\nThe Amazon Resource Name (ARN) of the Amazon Simple Notification Service (Amazon SNS) topic.\n
:type NotificationTypes: list
:param NotificationTypes: [REQUIRED]\nThe type of event that causes the notification to be sent. To query the notification types supported by Amazon EC2 Auto Scaling, call the DescribeAutoScalingNotificationTypes API.\n\n(string) --\n\n
:return: response = client.put_notification_configuration(
AutoScalingGroupName='my-auto-scaling-group',
NotificationTypes=[
'autoscaling:TEST_NOTIFICATION',
],
TopicARN='arn:aws:sns:us-west-2:123456789012:my-sns-topic',
)
print(response)
:returns:
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
"""
pass
def put_scaling_policy(AutoScalingGroupName=None, PolicyName=None, PolicyType=None, AdjustmentType=None, MinAdjustmentStep=None, MinAdjustmentMagnitude=None, ScalingAdjustment=None, Cooldown=None, MetricAggregationType=None, StepAdjustments=None, EstimatedInstanceWarmup=None, TargetTrackingConfiguration=None, Enabled=None):
"""
Creates or updates a scaling policy for an Auto Scaling group.
For more information about using scaling policies to scale your Auto Scaling group, see Target Tracking Scaling Policies and Step and Simple Scaling Policies in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example adds the specified policy to the specified Auto Scaling group.
Expected Output:
:example: response = client.put_scaling_policy(
AutoScalingGroupName='string',
PolicyName='string',
PolicyType='string',
AdjustmentType='string',
MinAdjustmentStep=123,
MinAdjustmentMagnitude=123,
ScalingAdjustment=123,
Cooldown=123,
MetricAggregationType='string',
StepAdjustments=[
{
'MetricIntervalLowerBound': 123.0,
'MetricIntervalUpperBound': 123.0,
'ScalingAdjustment': 123
},
],
EstimatedInstanceWarmup=123,
TargetTrackingConfiguration={
'PredefinedMetricSpecification': {
'PredefinedMetricType': 'ASGAverageCPUUtilization'|'ASGAverageNetworkIn'|'ASGAverageNetworkOut'|'ALBRequestCountPerTarget',
'ResourceLabel': 'string'
},
'CustomizedMetricSpecification': {
'MetricName': 'string',
'Namespace': 'string',
'Dimensions': [
{
'Name': 'string',
'Value': 'string'
},
],
'Statistic': 'Average'|'Minimum'|'Maximum'|'SampleCount'|'Sum',
'Unit': 'string'
},
'TargetValue': 123.0,
'DisableScaleIn': True|False
},
Enabled=True|False
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type PolicyName: string
:param PolicyName: [REQUIRED]\nThe name of the policy.\n
:type PolicyType: string
:param PolicyType: The policy type. The valid values are SimpleScaling , StepScaling , and TargetTrackingScaling . If the policy type is null, the value is treated as SimpleScaling .
:type AdjustmentType: string
:param AdjustmentType: Specifies whether the ScalingAdjustment parameter is an absolute number or a percentage of the current capacity. The valid values are ChangeInCapacity , ExactCapacity , and PercentChangeInCapacity .\nValid only if the policy type is StepScaling or SimpleScaling . For more information, see Scaling Adjustment Types in the Amazon EC2 Auto Scaling User Guide .\n
:type MinAdjustmentStep: integer
:param MinAdjustmentStep: Available for backward compatibility. Use MinAdjustmentMagnitude instead.
:type MinAdjustmentMagnitude: integer
:param MinAdjustmentMagnitude: The minimum value to scale by when scaling by percentages. For example, suppose that you create a step scaling policy to scale out an Auto Scaling group by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances.\nValid only if the policy type is StepScaling or SimpleScaling and the adjustment type is PercentChangeInCapacity . For more information, see Scaling Adjustment Types in the Amazon EC2 Auto Scaling User Guide .\n
:type ScalingAdjustment: integer
:param ScalingAdjustment: The amount by which a simple scaling policy scales the Auto Scaling group in response to an alarm breach. The adjustment is based on the value that you specified in the AdjustmentType parameter (either an absolute number or a percentage). A positive value adds to the current capacity and a negative value subtracts from the current capacity. For exact capacity, you must specify a positive value.\nConditional: If you specify SimpleScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)\n
:type Cooldown: integer
:param Cooldown: The amount of time, in seconds, after a scaling activity completes before any further dynamic scaling activities can start. If this parameter is not specified, the default cooldown period for the group applies.\nValid only if the policy type is SimpleScaling . For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n
:type MetricAggregationType: string
:param MetricAggregationType: The aggregation type for the CloudWatch metrics. The valid values are Minimum , Maximum , and Average . If the aggregation type is null, the value is treated as Average .\nValid only if the policy type is StepScaling .\n
:type StepAdjustments: list
:param StepAdjustments: A set of adjustments that enable you to scale based on the size of the alarm breach.\nConditional: If you specify StepScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)\n\n(dict) --Describes information used to create a step adjustment for a step scaling policy.\nFor the following examples, suppose that you have an alarm with a breach threshold of 50:\n\nTo trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.\nTo trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.\n\nThere are a few rules for the step adjustments for your step policy:\n\nThe ranges of your step adjustments can\'t overlap or have a gap.\nAt most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.\nAt most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.\nThe upper and lower bound can\'t be null in the same step adjustment.\n\nFor more information, see Step Adjustments in the Amazon EC2 Auto Scaling User Guide .\n\nMetricIntervalLowerBound (float) --The lower bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the lower bound is inclusive (the metric must be greater than or equal to the threshold plus the lower bound). Otherwise, it is exclusive (the metric must be greater than the threshold plus the lower bound). A null value indicates negative infinity.\n\nMetricIntervalUpperBound (float) --The upper bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the upper bound is exclusive (the metric must be less than the threshold plus the upper bound). Otherwise, it is inclusive (the metric must be less than or equal to the threshold plus the upper bound). A null value indicates positive infinity.\nThe upper bound must be greater than the lower bound.\n\nScalingAdjustment (integer) -- [REQUIRED]The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.\n\n\n\n\n
:type EstimatedInstanceWarmup: integer
:param EstimatedInstanceWarmup: The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. The default is to use the value specified for the default cooldown period for the group.\nValid only if the policy type is StepScaling or TargetTrackingScaling .\n
:type TargetTrackingConfiguration: dict
:param TargetTrackingConfiguration: A target tracking scaling policy. Includes support for predefined or customized metrics.\nFor more information, see TargetTrackingConfiguration in the Amazon EC2 Auto Scaling API Reference .\nConditional: If you specify TargetTrackingScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)\n\nPredefinedMetricSpecification (dict) --A predefined metric. You must specify either a predefined metric or a customized metric.\n\nPredefinedMetricType (string) -- [REQUIRED]The metric type. The following predefined metrics are available:\n\nASGAverageCPUUtilization - Average CPU utilization of the Auto Scaling group.\nASGAverageNetworkIn - Average number of bytes received on all network interfaces by the Auto Scaling group.\nASGAverageNetworkOut - Average number of bytes sent out on all network interfaces by the Auto Scaling group.\nALBRequestCountPerTarget - Number of requests completed per target in an Application Load Balancer target group.\n\n\nResourceLabel (string) --Identifies the resource associated with the metric type. You can\'t specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.\nThe format is ``app/load-balancer-name /load-balancer-id /targetgroup/target-group-name /target-group-id `` , where\n\n``app/load-balancer-name /load-balancer-id `` is the final portion of the load balancer ARN, and\n``targetgroup/target-group-name /target-group-id `` is the final portion of the target group ARN.\n\n\n\n\nCustomizedMetricSpecification (dict) --A customized metric. You must specify either a predefined metric or a customized metric.\n\nMetricName (string) -- [REQUIRED]The name of the metric.\n\nNamespace (string) -- [REQUIRED]The namespace of the metric.\n\nDimensions (list) --The dimensions of the metric.\nConditional: If you published your metric with dimensions, you must specify the same dimensions in your scaling policy.\n\n(dict) --Describes the dimension of a metric.\n\nName (string) -- [REQUIRED]The name of the dimension.\n\nValue (string) -- [REQUIRED]The value of the dimension.\n\n\n\n\n\nStatistic (string) -- [REQUIRED]The statistic of the metric.\n\nUnit (string) --The unit of the metric.\n\n\n\nTargetValue (float) -- [REQUIRED]The target value for the metric.\n\nDisableScaleIn (boolean) --Indicates whether scaling in by the target tracking scaling policy is disabled. If scaling in is disabled, the target tracking scaling policy doesn\'t remove instances from the Auto Scaling group. Otherwise, the target tracking scaling policy can remove instances from the Auto Scaling group. The default is false .\n\n\n
:type Enabled: boolean
:param Enabled: Indicates whether the scaling policy is enabled or disabled. The default is enabled. For more information, see Disabling a Scaling Policy for an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide .
:rtype: dict
ReturnsResponse Syntax
{
'PolicyARN': 'string',
'Alarms': [
{
'AlarmName': 'string',
'AlarmARN': 'string'
},
]
}
Response Structure
(dict) --
Contains the output of PutScalingPolicy.
PolicyARN (string) --
The Amazon Resource Name (ARN) of the policy.
Alarms (list) --
The CloudWatch alarms created for the target tracking scaling policy.
(dict) --
Describes an alarm.
AlarmName (string) --
The name of the alarm.
AlarmARN (string) --
The Amazon Resource Name (ARN) of the alarm.
Exceptions
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
Examples
This example adds the specified policy to the specified Auto Scaling group.
response = client.put_scaling_policy(
AdjustmentType='ChangeInCapacity',
AutoScalingGroupName='my-auto-scaling-group',
PolicyName='ScaleIn',
ScalingAdjustment=-1,
)
print(response)
Expected Output:
{
'PolicyARN': 'arn:aws:autoscaling:us-west-2:123456789012:scalingPolicy:2233f3d7-6290-403b-b632-93c553560106:autoScalingGroupName/my-auto-scaling-group:policyName/ScaleIn',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'PolicyARN': 'string',
'Alarms': [
{
'AlarmName': 'string',
'AlarmARN': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
AutoScaling.Client.exceptions.ServiceLinkedRoleFailure
"""
pass
def put_scheduled_update_group_action(AutoScalingGroupName=None, ScheduledActionName=None, Time=None, StartTime=None, EndTime=None, Recurrence=None, MinSize=None, MaxSize=None, DesiredCapacity=None):
"""
Creates or updates a scheduled scaling action for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.
For more information, see Scheduled Scaling in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example adds the specified scheduled action to the specified Auto Scaling group.
Expected Output:
:example: response = client.put_scheduled_update_group_action(
AutoScalingGroupName='string',
ScheduledActionName='string',
Time=datetime(2015, 1, 1),
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Recurrence='string',
MinSize=123,
MaxSize=123,
DesiredCapacity=123
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ScheduledActionName: string
:param ScheduledActionName: [REQUIRED]\nThe name of this scaling action.\n
:type Time: datetime
:param Time: This parameter is no longer used.
:type StartTime: datetime
:param StartTime: The date and time for this action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, '2019-06-01T00:00:00Z' ).\nIf you specify Recurrence and StartTime , Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.\nIf you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.\n
:type EndTime: datetime
:param EndTime: The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.
:type Recurrence: string
:param Recurrence: The recurring schedule for this action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, '30 0 1 1,6,12 *' ). For more information about this format, see Crontab .\nWhen StartTime and EndTime are specified with Recurrence , they form the boundaries of when the recurring action starts and stops.\n
:type MinSize: integer
:param MinSize: The minimum size of the Auto Scaling group.
:type MaxSize: integer
:param MaxSize: The maximum size of the Auto Scaling group.
:type DesiredCapacity: integer
:param DesiredCapacity: The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain. It can scale beyond this capacity if you add more scaling conditions.
:return: response = client.put_scheduled_update_group_action(
AutoScalingGroupName='my-auto-scaling-group',
DesiredCapacity=4,
EndTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0),
MaxSize=6,
MinSize=2,
ScheduledActionName='my-scheduled-action',
StartTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0),
)
print(response)
:returns:
AutoScaling.Client.exceptions.AlreadyExistsFault
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def record_lifecycle_action_heartbeat(LifecycleHookName=None, AutoScalingGroupName=None, LifecycleActionToken=None, InstanceId=None):
"""
Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example records a lifecycle action heartbeat to keep the instance in a pending state.
Expected Output:
:example: response = client.record_lifecycle_action_heartbeat(
LifecycleHookName='string',
AutoScalingGroupName='string',
LifecycleActionToken='string',
InstanceId='string'
)
:type LifecycleHookName: string
:param LifecycleHookName: [REQUIRED]\nThe name of the lifecycle hook.\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type LifecycleActionToken: string
:param LifecycleActionToken: A token that uniquely identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target that you specified when you created the lifecycle hook.
:type InstanceId: string
:param InstanceId: The ID of the instance.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example records a lifecycle action heartbeat to keep the instance in a pending state.
response = client.record_lifecycle_action_heartbeat(
AutoScalingGroupName='my-auto-scaling-group',
LifecycleActionToken='bcd2f1b8-9a78-44d3-8a7a-4dd07d7cf635',
LifecycleHookName='my-lifecycle-hook',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
LifecycleHookName (string) -- [REQUIRED]
The name of the lifecycle hook.
AutoScalingGroupName (string) -- [REQUIRED]
The name of the Auto Scaling group.
LifecycleActionToken (string) -- A token that uniquely identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target that you specified when you created the lifecycle hook.
InstanceId (string) -- The ID of the instance.
"""
pass
def resume_processes(AutoScalingGroupName=None, ScalingProcesses=None):
"""
Resumes the specified suspended automatic scaling processes, or all suspended process, for the specified Auto Scaling group.
For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example resumes the specified suspended scaling process for the specified Auto Scaling group.
Expected Output:
:example: response = client.resume_processes(
AutoScalingGroupName='string',
ScalingProcesses=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ScalingProcesses: list
:param ScalingProcesses: One or more of the following processes. If you omit this parameter, all processes are specified.\n\nLaunch\nTerminate\nHealthCheck\nReplaceUnhealthy\nAZRebalance\nAlarmNotification\nScheduledActions\nAddToLoadBalancer\n\n\n(string) --\n\n
:return: response = client.resume_processes(
AutoScalingGroupName='my-auto-scaling-group',
ScalingProcesses=[
'AlarmNotification',
],
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceInUseFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def set_desired_capacity(AutoScalingGroupName=None, DesiredCapacity=None, HonorCooldown=None):
"""
Sets the size of the specified Auto Scaling group.
If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.
For more information, see Manual Scaling in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example sets the desired capacity for the specified Auto Scaling group.
Expected Output:
:example: response = client.set_desired_capacity(
AutoScalingGroupName='string',
DesiredCapacity=123,
HonorCooldown=True|False
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type DesiredCapacity: integer
:param DesiredCapacity: [REQUIRED]\nThe desired capacity is the initial capacity of the Auto Scaling group after this operation completes and the capacity it attempts to maintain.\n
:type HonorCooldown: boolean
:param HonorCooldown: Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before initiating a scaling activity to set your Auto Scaling group to its new capacity. By default, Amazon EC2 Auto Scaling does not honor the cooldown period during manual scaling activities.
:return: response = client.set_desired_capacity(
AutoScalingGroupName='my-auto-scaling-group',
DesiredCapacity=2,
HonorCooldown=True,
)
print(response)
:returns:
AutoScaling.Client.exceptions.ScalingActivityInProgressFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def set_instance_health(InstanceId=None, HealthStatus=None, ShouldRespectGracePeriod=None):
"""
Sets the health status of the specified instance.
For more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example sets the health status of the specified instance to Unhealthy.
Expected Output:
:example: response = client.set_instance_health(
InstanceId='string',
HealthStatus='string',
ShouldRespectGracePeriod=True|False
)
:type InstanceId: string
:param InstanceId: [REQUIRED]\nThe ID of the instance.\n
:type HealthStatus: string
:param HealthStatus: [REQUIRED]\nThe health status of the instance. Set to Healthy to have the instance remain in service. Set to Unhealthy to have the instance be out of service. Amazon EC2 Auto Scaling terminates and replaces the unhealthy instance.\n
:type ShouldRespectGracePeriod: boolean
:param ShouldRespectGracePeriod: If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False , to have the call not respect the grace period associated with the group.\nFor more information about the health check grace period, see CreateAutoScalingGroup in the Amazon EC2 Auto Scaling API Reference .\n
:return: response = client.set_instance_health(
HealthStatus='Unhealthy',
InstanceId='i-93633f9b',
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def set_instance_protection(InstanceIds=None, AutoScalingGroupName=None, ProtectedFromScaleIn=None):
"""
Updates the instance protection settings of the specified instances.
For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example enables instance protection for the specified instance.
Expected Output:
This example disables instance protection for the specified instance.
Expected Output:
:example: response = client.set_instance_protection(
InstanceIds=[
'string',
],
AutoScalingGroupName='string',
ProtectedFromScaleIn=True|False
)
:type InstanceIds: list
:param InstanceIds: [REQUIRED]\nOne or more instance IDs.\n\n(string) --\n\n
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ProtectedFromScaleIn: boolean
:param ProtectedFromScaleIn: [REQUIRED]\nIndicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
AutoScaling.Client.exceptions.LimitExceededFault
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example enables instance protection for the specified instance.
response = client.set_instance_protection(
AutoScalingGroupName='my-auto-scaling-group',
InstanceIds=[
'i-93633f9b',
],
ProtectedFromScaleIn=True,
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
This example disables instance protection for the specified instance.
response = client.set_instance_protection(
AutoScalingGroupName='my-auto-scaling-group',
InstanceIds=[
'i-93633f9b',
],
ProtectedFromScaleIn=False,
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
(dict) --
"""
pass
def suspend_processes(AutoScalingGroupName=None, ScalingProcesses=None):
"""
Suspends the specified automatic scaling processes, or all processes, for the specified Auto Scaling group.
If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide .
To resume processes that have been suspended, call the ResumeProcesses API.
See also: AWS API Documentation
Exceptions
Examples
This example suspends the specified scaling process for the specified Auto Scaling group.
Expected Output:
:example: response = client.suspend_processes(
AutoScalingGroupName='string',
ScalingProcesses=[
'string',
]
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type ScalingProcesses: list
:param ScalingProcesses: One or more of the following processes. If you omit this parameter, all processes are specified.\n\nLaunch\nTerminate\nHealthCheck\nReplaceUnhealthy\nAZRebalance\nAlarmNotification\nScheduledActions\nAddToLoadBalancer\n\n\n(string) --\n\n
:return: response = client.suspend_processes(
AutoScalingGroupName='my-auto-scaling-group',
ScalingProcesses=[
'AlarmNotification',
],
)
print(response)
:returns:
AutoScaling.Client.exceptions.ResourceInUseFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def terminate_instance_in_auto_scaling_group(InstanceId=None, ShouldDecrementDesiredCapacity=None):
"""
Terminates the specified instance and optionally adjusts the desired group size.
This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated . You can\'t connect to or start an instance after you\'ve terminated it.
If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.
By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing Activities in the Amazon EC2 Auto Scaling User Guide .
See also: AWS API Documentation
Exceptions
Examples
This example terminates the specified instance from the specified Auto Scaling group without updating the size of the group. Auto Scaling launches a replacement instance after the specified instance terminates.
Expected Output:
:example: response = client.terminate_instance_in_auto_scaling_group(
InstanceId='string',
ShouldDecrementDesiredCapacity=True|False
)
:type InstanceId: string
:param InstanceId: [REQUIRED]\nThe ID of the instance.\n
:type ShouldDecrementDesiredCapacity: boolean
:param ShouldDecrementDesiredCapacity: [REQUIRED]\nIndicates whether terminating the instance also decrements the size of the Auto Scaling group.\n
:rtype: dict
ReturnsResponse Syntax
{
'Activity': {
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
}
}
Response Structure
(dict) --
Activity (dict) --
A scaling activity.
ActivityId (string) --
The ID of the activity.
AutoScalingGroupName (string) --
The name of the Auto Scaling group.
Description (string) --
A friendly, more verbose description of the activity.
Cause (string) --
The reason the activity began.
StartTime (datetime) --
The start time of the activity.
EndTime (datetime) --
The end time of the activity.
StatusCode (string) --
The current status of the activity.
StatusMessage (string) --
A friendly, more verbose description of the activity status.
Progress (integer) --
A value between 0 and 100 that indicates the progress of the activity.
Details (string) --
The details about the activity.
Exceptions
AutoScaling.Client.exceptions.ScalingActivityInProgressFault
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example terminates the specified instance from the specified Auto Scaling group without updating the size of the group. Auto Scaling launches a replacement instance after the specified instance terminates.
response = client.terminate_instance_in_auto_scaling_group(
InstanceId='i-93633f9b',
ShouldDecrementDesiredCapacity=False,
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Activity': {
'ActivityId': 'string',
'AutoScalingGroupName': 'string',
'Description': 'string',
'Cause': 'string',
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'StatusCode': 'PendingSpotBidPlacement'|'WaitingForSpotInstanceRequestId'|'WaitingForSpotInstanceId'|'WaitingForInstanceId'|'PreInService'|'InProgress'|'WaitingForELBConnectionDraining'|'MidLifecycleAction'|'WaitingForInstanceWarmup'|'Successful'|'Failed'|'Cancelled',
'StatusMessage': 'string',
'Progress': 123,
'Details': 'string'
}
}
:returns:
AutoScaling.Client.exceptions.ScalingActivityInProgressFault
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
def update_auto_scaling_group(AutoScalingGroupName=None, LaunchConfigurationName=None, LaunchTemplate=None, MixedInstancesPolicy=None, MinSize=None, MaxSize=None, DesiredCapacity=None, DefaultCooldown=None, AvailabilityZones=None, HealthCheckType=None, HealthCheckGracePeriod=None, PlacementGroup=None, VPCZoneIdentifier=None, TerminationPolicies=None, NewInstancesProtectedFromScaleIn=None, ServiceLinkedRoleARN=None, MaxInstanceLifetime=None):
"""
Updates the configuration for the specified Auto Scaling group.
To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don\'t specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns.
If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.
Note the following about changing DesiredCapacity , MaxSize , or MinSize :
To see which parameters have been set, call the DescribeAutoScalingGroups API. To view the scaling policies for an Auto Scaling group, call the DescribePolicies API. If the group has scaling policies, you can update them by calling the PutScalingPolicy API.
See also: AWS API Documentation
Exceptions
Examples
This example updates the launch configuration of the specified Auto Scaling group.
Expected Output:
This example updates the minimum size and maximum size of the specified Auto Scaling group.
Expected Output:
This example enables instance protection for the specified Auto Scaling group.
Expected Output:
:example: response = client.update_auto_scaling_group(
AutoScalingGroupName='string',
LaunchConfigurationName='string',
LaunchTemplate={
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
MixedInstancesPolicy={
'LaunchTemplate': {
'LaunchTemplateSpecification': {
'LaunchTemplateId': 'string',
'LaunchTemplateName': 'string',
'Version': 'string'
},
'Overrides': [
{
'InstanceType': 'string',
'WeightedCapacity': 'string'
},
]
},
'InstancesDistribution': {
'OnDemandAllocationStrategy': 'string',
'OnDemandBaseCapacity': 123,
'OnDemandPercentageAboveBaseCapacity': 123,
'SpotAllocationStrategy': 'string',
'SpotInstancePools': 123,
'SpotMaxPrice': 'string'
}
},
MinSize=123,
MaxSize=123,
DesiredCapacity=123,
DefaultCooldown=123,
AvailabilityZones=[
'string',
],
HealthCheckType='string',
HealthCheckGracePeriod=123,
PlacementGroup='string',
VPCZoneIdentifier='string',
TerminationPolicies=[
'string',
],
NewInstancesProtectedFromScaleIn=True|False,
ServiceLinkedRoleARN='string',
MaxInstanceLifetime=123
)
:type AutoScalingGroupName: string
:param AutoScalingGroupName: [REQUIRED]\nThe name of the Auto Scaling group.\n
:type LaunchConfigurationName: string
:param LaunchConfigurationName: The name of the launch configuration. If you specify LaunchConfigurationName in your update request, you can\'t specify LaunchTemplate or MixedInstancesPolicy .
:type LaunchTemplate: dict
:param LaunchTemplate: The launch template and version to use to specify the updates. If you specify LaunchTemplate in your update request, you can\'t specify LaunchConfigurationName or MixedInstancesPolicy .\nFor more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference .\n\nLaunchTemplateId (string) --The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.\nYou must specify either a template ID or a template name.\n\nLaunchTemplateName (string) --The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.\nYou must specify either a template ID or a template name.\n\nVersion (string) --The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.\nIf the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n\n\n
:type MixedInstancesPolicy: dict
:param MixedInstancesPolicy: An embedded object that specifies a mixed instances policy.\nIn your call to UpdateAutoScalingGroup , you can make changes to the policy that is specified. All optional parameters are left unchanged if not specified.\nFor more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide .\n\nLaunchTemplate (dict) --The launch template and instance types (overrides).\nThis parameter must be specified when creating a mixed instances policy.\n\nLaunchTemplateSpecification (dict) --The launch template to use. You must specify either the launch template ID or launch template name in the request.\n\nLaunchTemplateId (string) --The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.\nYou must specify either a template ID or a template name.\n\nLaunchTemplateName (string) --The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.\nYou must specify either a template ID or a template name.\n\nVersion (string) --The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.\nIf the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .\n\n\n\nOverrides (list) --Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.\nIf not provided, Amazon EC2 Auto Scaling will use the instance type specified in the launch template to launch instances.\n\n(dict) --Describes an override for a launch template. Currently, the only supported override is instance type.\nThe maximum number of instance type overrides that can be associated with an Auto Scaling group is 20.\n\nInstanceType (string) --The instance type. You must use an instance type that is supported in your requested Region and Availability Zones.\nFor information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.\n\nWeightedCapacity (string) --The number of capacity units, which gives the instance type a proportional weight to other instance types. For example, larger instance types are generally weighted more than smaller instance types. These are the same units that you chose to set the desired capacity in terms of instances, or a performance attribute such as vCPUs, memory, or I/O.\nFor more information, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide .\nValid Range: Minimum value of 1. Maximum value of 999.\n\n\n\n\n\n\n\nInstancesDistribution (dict) --The instances distribution to use.\nIf you leave this parameter unspecified, the value for each parameter in InstancesDistribution uses a default value.\n\nOnDemandAllocationStrategy (string) --Indicates how to allocate instance types to fulfill On-Demand capacity.\nThe only valid value is prioritized , which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.\n\nOnDemandBaseCapacity (integer) --The minimum amount of the Auto Scaling group\'s capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.\nDefault if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group\'s desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.\n\nNote\nAn update to this setting means a gradual replacement of instances to maintain the specified number of On-Demand Instances for your base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.\n\n\nOnDemandPercentageAboveBaseCapacity (integer) --Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity .\nDefault if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.\n\nNote\nAn update to this setting means a gradual replacement of instances to maintain the percentage of On-Demand Instances for your additional capacity above the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.\n\nValid Range: Minimum value of 0. Maximum value of 100.\n\nSpotAllocationStrategy (string) --Indicates how to allocate instances across Spot Instance pools.\nIf the allocation strategy is lowest-price , the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized , the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.\nThe default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price . The default Spot allocation strategy for the AWS Management Console is capacity-optimized .\nValid values: lowest-price | capacity-optimized\n\nSpotInstancePools (integer) --The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate . Default if not set is 2.\nUsed only when the Spot allocation strategy is lowest-price .\nValid Range: Minimum value of 1. Maximum value of 20.\n\nSpotMaxPrice (string) --The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.\nTo remove a value that you previously set, include the parameter but leave the value blank.\n\n\n\n\n
:type MinSize: integer
:param MinSize: The minimum size of the Auto Scaling group.
:type MaxSize: integer
:param MaxSize: The maximum size of the Auto Scaling group.\n\nNote\nWith a mixed instances policy that uses instance weighting, Amazon EC2 Auto Scaling may need to go above MaxSize to meet your capacity requirements. In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more than your maximum instance weight (weights that define how many capacity units each instance contributes to the capacity of the group).\n\n
:type DesiredCapacity: integer
:param DesiredCapacity: The desired capacity is the initial capacity of the Auto Scaling group after this operation completes and the capacity it attempts to maintain.\nThis number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.\n
:type DefaultCooldown: integer
:param DefaultCooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300 . This cooldown period is not used when a scaling-specific cooldown is specified.\nCooldown periods are not supported for target tracking scaling policies, step scaling policies, or scheduled scaling. For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .\n
:type AvailabilityZones: list
:param AvailabilityZones: One or more Availability Zones for the group.\n\n(string) --\n\n
:type HealthCheckType: string
:param HealthCheckType: The service to use for the health checks. The valid values are EC2 and ELB . If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.
:type HealthCheckGracePeriod: integer
:param HealthCheckGracePeriod: The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default value is 0 .\nFor more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide .\nConditional: This parameter is required if you are adding an ELB health check.\n
:type PlacementGroup: string
:param PlacementGroup: The name of the placement group into which to launch your instances, if any. A placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a placement group. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances .
:type VPCZoneIdentifier: string
:param VPCZoneIdentifier: A comma-separated list of subnet IDs for virtual private cloud (VPC).\nIf you specify VPCZoneIdentifier with AvailabilityZones , the subnets that you specify for this parameter must reside in those Availability Zones.\n
:type TerminationPolicies: list
:param TerminationPolicies: A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.\nFor more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide .\n\n(string) --\n\n
:type NewInstancesProtectedFromScaleIn: boolean
:param NewInstancesProtectedFromScaleIn: Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in.\nFor more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide .\n
:type ServiceLinkedRoleARN: string
:param ServiceLinkedRoleARN: The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. For more information, see Service-Linked Roles in the Amazon EC2 Auto Scaling User Guide .
:type MaxInstanceLifetime: integer
:param MaxInstanceLifetime: The maximum amount of time, in seconds, that an instance can be in service. The default is null.\nThis parameter is optional, but if you specify a value for it, you must specify a value of at least 604,800 seconds (7 days). To clear a previously set value, specify a new value of 0.\nFor more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide .\nValid Range: Minimum value of 0.\n
:return: response = client.update_auto_scaling_group(
AutoScalingGroupName='my-auto-scaling-group',
LaunchConfigurationName='new-launch-config',
)
print(response)
:returns:
AutoScalingGroupName (string) -- [REQUIRED]
The name of the Auto Scaling group.
LaunchConfigurationName (string) -- The name of the launch configuration. If you specify LaunchConfigurationName in your update request, you can\'t specify LaunchTemplate or MixedInstancesPolicy .
LaunchTemplate (dict) -- The launch template and version to use to specify the updates. If you specify LaunchTemplate in your update request, you can\'t specify LaunchConfigurationName or MixedInstancesPolicy .
For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference .
LaunchTemplateId (string) --The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
LaunchTemplateName (string) --The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
Version (string) --The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.
If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .
MixedInstancesPolicy (dict) -- An embedded object that specifies a mixed instances policy.
In your call to UpdateAutoScalingGroup , you can make changes to the policy that is specified. All optional parameters are left unchanged if not specified.
For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide .
LaunchTemplate (dict) --The launch template and instance types (overrides).
This parameter must be specified when creating a mixed instances policy.
LaunchTemplateSpecification (dict) --The launch template to use. You must specify either the launch template ID or launch template name in the request.
LaunchTemplateId (string) --The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
LaunchTemplateName (string) --The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
You must specify either a template ID or a template name.
Version (string) --The version number, $Latest , or $Default . To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.
If the value is $Latest , Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default , Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default .
Overrides (list) --Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.
If not provided, Amazon EC2 Auto Scaling will use the instance type specified in the launch template to launch instances.
(dict) --Describes an override for a launch template. Currently, the only supported override is instance type.
The maximum number of instance type overrides that can be associated with an Auto Scaling group is 20.
InstanceType (string) --The instance type. You must use an instance type that is supported in your requested Region and Availability Zones.
For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.
WeightedCapacity (string) --The number of capacity units, which gives the instance type a proportional weight to other instance types. For example, larger instance types are generally weighted more than smaller instance types. These are the same units that you chose to set the desired capacity in terms of instances, or a performance attribute such as vCPUs, memory, or I/O.
For more information, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide .
Valid Range: Minimum value of 1. Maximum value of 999.
InstancesDistribution (dict) --The instances distribution to use.
If you leave this parameter unspecified, the value for each parameter in InstancesDistribution uses a default value.
OnDemandAllocationStrategy (string) --Indicates how to allocate instance types to fulfill On-Demand capacity.
The only valid value is prioritized , which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.
OnDemandBaseCapacity (integer) --The minimum amount of the Auto Scaling group\'s capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group\'s desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.
Note
An update to this setting means a gradual replacement of instances to maintain the specified number of On-Demand Instances for your base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.
OnDemandPercentageAboveBaseCapacity (integer) --Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity .
Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.
Note
An update to this setting means a gradual replacement of instances to maintain the percentage of On-Demand Instances for your additional capacity above the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.
Valid Range: Minimum value of 0. Maximum value of 100.
SpotAllocationStrategy (string) --Indicates how to allocate instances across Spot Instance pools.
If the allocation strategy is lowest-price , the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized , the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
The default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price . The default Spot allocation strategy for the AWS Management Console is capacity-optimized .
Valid values: lowest-price | capacity-optimized
SpotInstancePools (integer) --The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate . Default if not set is 2.
Used only when the Spot allocation strategy is lowest-price .
Valid Range: Minimum value of 1. Maximum value of 20.
SpotMaxPrice (string) --The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.
To remove a value that you previously set, include the parameter but leave the value blank.
MinSize (integer) -- The minimum size of the Auto Scaling group.
MaxSize (integer) -- The maximum size of the Auto Scaling group.
Note
With a mixed instances policy that uses instance weighting, Amazon EC2 Auto Scaling may need to go above MaxSize to meet your capacity requirements. In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more than your maximum instance weight (weights that define how many capacity units each instance contributes to the capacity of the group).
DesiredCapacity (integer) -- The desired capacity is the initial capacity of the Auto Scaling group after this operation completes and the capacity it attempts to maintain.
This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.
DefaultCooldown (integer) -- The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300 . This cooldown period is not used when a scaling-specific cooldown is specified.
Cooldown periods are not supported for target tracking scaling policies, step scaling policies, or scheduled scaling. For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide .
AvailabilityZones (list) -- One or more Availability Zones for the group.
(string) --
HealthCheckType (string) -- The service to use for the health checks. The valid values are EC2 and ELB . If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.
HealthCheckGracePeriod (integer) -- The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default value is 0 .
For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide .
Conditional: This parameter is required if you are adding an ELB health check.
PlacementGroup (string) -- The name of the placement group into which to launch your instances, if any. A placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a placement group. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances .
VPCZoneIdentifier (string) -- A comma-separated list of subnet IDs for virtual private cloud (VPC).
If you specify VPCZoneIdentifier with AvailabilityZones , the subnets that you specify for this parameter must reside in those Availability Zones.
TerminationPolicies (list) -- A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.
For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide .
(string) --
NewInstancesProtectedFromScaleIn (boolean) -- Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in.
For more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide .
ServiceLinkedRoleARN (string) -- The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. For more information, see Service-Linked Roles in the Amazon EC2 Auto Scaling User Guide .
MaxInstanceLifetime (integer) -- The maximum amount of time, in seconds, that an instance can be in service. The default is null.
This parameter is optional, but if you specify a value for it, you must specify a value of at least 604,800 seconds (7 days). To clear a previously set value, specify a new value of 0.
For more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide .
Valid Range: Minimum value of 0.
"""
pass
| 41.716698
| 7,326
| 0.706619
| 31,760
| 266,820
| 5.921568
| 0.047985
| 0.037784
| 0.030967
| 0.022545
| 0.832211
| 0.802838
| 0.769627
| 0.743153
| 0.722607
| 0.69785
| 0
| 0.012174
| 0.226033
| 266,820
| 6,395
| 7,327
| 41.723221
| 0.898528
| 0.96817
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
a46923ea53f4c0436157a6281c7c0e2f28c4a684
| 78
|
py
|
Python
|
tdtax/test/test_validate.py
|
sydneyjenkins/timedomain-taxonomy
|
9b1dd7e8fa0fa48648a25ede613ce443fa37a88b
|
[
"MIT"
] | null | null | null |
tdtax/test/test_validate.py
|
sydneyjenkins/timedomain-taxonomy
|
9b1dd7e8fa0fa48648a25ede613ce443fa37a88b
|
[
"MIT"
] | null | null | null |
tdtax/test/test_validate.py
|
sydneyjenkins/timedomain-taxonomy
|
9b1dd7e8fa0fa48648a25ede613ce443fa37a88b
|
[
"MIT"
] | null | null | null |
import tdtax
def test():
tdtax.validate(tdtax.taxonomy, tdtax.taxonomy)
| 13
| 50
| 0.730769
| 10
| 78
| 5.7
| 0.6
| 0.45614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 78
| 5
| 51
| 15.6
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f1131326ad9a9d570b04646dc34c69e02398e517
| 8,858
|
py
|
Python
|
service/agent/service/agent/__init__.py
|
plvhx/qiscus-programming-test
|
3b6480c41a787ee89770d1375f3a28bbc5b4f453
|
[
"BSD-2-Clause"
] | null | null | null |
service/agent/service/agent/__init__.py
|
plvhx/qiscus-programming-test
|
3b6480c41a787ee89770d1375f3a28bbc5b4f453
|
[
"BSD-2-Clause"
] | null | null | null |
service/agent/service/agent/__init__.py
|
plvhx/qiscus-programming-test
|
3b6480c41a787ee89770d1375f3a28bbc5b4f453
|
[
"BSD-2-Clause"
] | null | null | null |
from service.abstract import Abstract
from service.exception import IncompleteRequiredHeaderException
from service.exception import HeaderValueViolationException
from service.exception import TypeErrorException
class Agent(Abstract):
def hand_over(self, data, headers=None):
url = "/api/v1/agent/service/assign_agent"
if isinstance(headers, dict):
self.get_client().set_headers(headers)
headers = self.get_client().get_headers()
try:
headers["Content-Type"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Content-Type' from headers list."
) from e
try:
headers["Qiscus-App-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-App-Id' from headers list."
) from e
try:
headers["Qiscus-Secret-Key"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-Secret-Key' from headers list."
) from e
try:
headers["Qiscus-User-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-Header-Id' from headers list."
) from e
if headers["Content-Type"] != "application/x-www-form-urlencoded":
raise HeaderValueViolationException(
"Content-Type value must be 'application/x-www-form-urlencoded'."
)
url = self.get_client().get_base_url() + url
return self.get_client().post(url, data)
def mark_as_resolved(self, data, headers=None):
url = "/api/v1/agent/service/mark_as_resolved"
if isinstance(headers, dict):
self.get_client().set_headers(headers)
headers = self.get_client().get_headers()
try:
headers["Authorization"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Authorization' from headers list."
) from e
try:
headers["Content-Type"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Content-Type' from headers list."
) from e
try:
headers["Qiscus-App-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-App-Id' from headers list."
) from e
url = self.get_client().get_base_url() + url
return self.get_client().post(url, data)
def takeover_status(self, headers=None):
url = "/api/v1/app/config/agent_takeover"
if isinstance(headers, dict):
self.get_client().set_headers(headers)
headers = self.get_client().get_headers()
try:
headers["Authorization"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Authorization' from headers list."
) from e
try:
headers["Qiscus-App-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-App-Id' from headers list."
) from e
url = self.get_client().get_base_url() + url
return self.get_client().get(url)
def available_agents(
self,
room_id,
limit=None,
cursor_after=None,
cursor_before=None,
is_available_in_room=False,
headers=None,
):
url = "/api/v2/agent/service/available_agents"
query = []
if isinstance(headers, dict):
self.get_client().set_headers(headers)
headers = self.get_client().get_headers()
try:
headers["Authorization"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Authorization' from headers list."
) from e
try:
headers["Qiscus-App-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-App-Id' from headers list."
) from e
if not isinstance(room_id, int):
raise TypeErrorException("'room_id' must be an integer.")
query.append("room_id=%d" % (room_id))
if isinstance(limit, int):
query.append("limit=%d" % (limit))
if isinstance(cursor_after, int):
query.append("cursor_after=%d" % (cursor_after))
if isinstance(cursor_before, int):
query.append("cursor_before=%d" % (cursor_before))
if isinstance(is_available_in_room, bool):
query.append(
"is_available_in_room=%s"
% ("true" if is_available_in_room == True else "false")
)
url = url + "?" + "&".join(query)
url = self.get_client().get_base_url() + url
return self.get_client().get(url)
def get_total_unserved(self, headers=None):
url = "/api/v2/agent/service/total_unserved"
if isinstance(headers, dict):
self.get_client().set_headers(headers)
headers = self.get_client().get_headers()
try:
headers["Authorization"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Authorization' from headers list."
) from e
try:
headers["Qiscus-App-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-App-Id' from headers list."
) from e
url = self.get_client().get_base_url() + url
return self.get_client().get(url)
def takeover_unserved(self, headers=None):
url = "/api/v2/agent/service/takeover_unserved"
if isinstance(headers, dict):
self.get_client().set_headers(headers)
headers = self.get_client().get_headers()
try:
headers["Authorization"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Authorization' from headers list."
) from e
try:
headers["Qiscus-App-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-App-Id' from headers list."
) from e
url = self.get_client().get_base_url() + url
return self.get_client().post(url)
def add_agent(self, data, headers=None):
url = "/api/v2/agent/service/add_agent"
if isinstance(headers, dict):
self.get_client().set_headers(headers)
headers = self.get_client().get_headers()
try:
headers["Authorization"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Authorization' from headers list."
) from e
try:
headers["Qiscus-App-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-App-Id' from headers list."
) from e
url = self.get_client().get_base_url() + url
return self.get_client().post(url)
def get_other_agents(
self,
cursor_after=None,
cursor_before=None,
room_id=None,
limit=None,
headers=None,
):
url = "/api/v2/agent/service/other_agents"
query = []
if isinstance(headers, dict):
self.get_client().set_headers(headers)
headers = self.get_client().get_headers()
try:
headers["Authorization"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Authorization' from headers list."
) from e
try:
headers["Qiscus-App-Id"]
except KeyError as e:
raise IncompleteRequiredHeaderException(
"Missing header 'Qiscus-App-Id' from headers list."
) from e
if isinstance(cursor_after, int):
query.append("cursor_after=%d" % (cursor_after))
if isinstance(cursor_before, int):
query.append("cursor_before=%d" % (cursor_before))
if isinstance(room_id, int):
query.append("room_id=%d" % (room_id))
if isinstance(limit, int):
query.append("limit=%d" % (limit))
url = url + "?" + "&".join(query)
url = self.get_client().get_base_url() + url
return self.get_client().get(url)
| 30.335616
| 81
| 0.579589
| 922
| 8,858
| 5.437093
| 0.095445
| 0.044684
| 0.082984
| 0.063834
| 0.860363
| 0.844205
| 0.830241
| 0.806902
| 0.796529
| 0.749052
| 0
| 0.001333
| 0.322308
| 8,858
| 291
| 82
| 30.439863
| 0.83375
| 0
| 0
| 0.818182
| 0
| 0
| 0.196433
| 0.042335
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.018182
| 0
| 0.095455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f18b2e4f3cb9cbb2aa3d2516362d0d15bdde6c1b
| 10,445
|
py
|
Python
|
src/asai_conversion.py
|
AlagarPrabu/Tamil_asai
|
ec757ca22dfdb154758f126e3d731628c0f3c1a4
|
[
"MIT"
] | null | null | null |
src/asai_conversion.py
|
AlagarPrabu/Tamil_asai
|
ec757ca22dfdb154758f126e3d731628c0f3c1a4
|
[
"MIT"
] | null | null | null |
src/asai_conversion.py
|
AlagarPrabu/Tamil_asai
|
ec757ca22dfdb154758f126e3d731628c0f3c1a4
|
[
"MIT"
] | null | null | null |
###
# Tamil_Asai
# Copyright 2021 The Author Alagar Prabu
###
import character
rule_1 = ["KKOOO", "KNOOO"]
rule_2 = ["KKOO", "KNOO"]
rule_3 = ["KKO", "KNO"]
rule_4 = ["KK", "KN"]
rule_5 = ["KOOO", "NOOO"]
rule_6 = ["KOO", "NOO"]
rule_7 = ["KO", "NO"]
rule_8 = ["K", "N"]
formula_formation = ""
splitted_formation = ""
initial_length = 0
def word_convert(word):
converted_word = ""
global formula_formation
formula_formation =""
get_letters = character.character(word)
for get_letter in get_letters:
letter_conversion = character.kuril(get_letter)
if(letter_conversion != False):
converted_word += letter_conversion
else:
print("Unknown tamil characters found")
exit()
return formula_verify(converted_word)
def formula_verify(actual_text):
global formula_formation
global splitted_formation
if len(actual_text) != 0:
length_actual_word = len(actual_text)
rule_word = actual_text
if(length_actual_word >= 5):
word_specific_length = rule_word[initial_length:5]
if word_specific_length in rule_1:
formula_formation += "NIRAI "
cut_length = 5
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:4]
if word_specific_length in rule_2:
formula_formation += "NIRAI "
cut_length = 4
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:4]
if word_specific_length in rule_5:
formula_formation += "NER "
cut_length = 4
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:3]
if word_specific_length in rule_3:
formula_formation += "NIRAI "
cut_length = 3
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:3]
if word_specific_length in rule_6:
formula_formation += "NER "
cut_length = 3
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:2]
if word_specific_length in rule_4:
formula_formation += "NIRAI "
cut_length = 2
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:2]
if word_specific_length in rule_7:
formula_formation += "NER "
cut_length = 2
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:1]
if word_specific_length in rule_8:
formula_formation += "NER "
cut_length = 1
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
elif(length_actual_word == 4):
word_specific_length = rule_word[initial_length:4]
if word_specific_length in rule_2:
formula_formation += "NIRAI "
cut_length = 4
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:4]
if word_specific_length in rule_5:
formula_formation += "NER "
cut_length = 4
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:3]
if word_specific_length in rule_3:
formula_formation += "NIRAI "
cut_length = 3
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:3]
if word_specific_length in rule_6:
formula_formation += "NER "
cut_length = 3
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:2]
if word_specific_length in rule_4:
formula_formation += "NIRAI "
cut_length = 2
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:2]
if word_specific_length in rule_7:
formula_formation += "NER "
cut_length = 2
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:1]
if word_specific_length in rule_8:
formula_formation += "NER "
cut_length = 1
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
elif(length_actual_word == 3):
word_specific_length = rule_word[initial_length:3]
if word_specific_length in rule_3:
formula_formation += "NIRAI "
cut_length = 3
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:3]
if word_specific_length in rule_6:
formula_formation += "NER "
cut_length = 3
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:2]
if word_specific_length in rule_4:
formula_formation += "NIRAI "
cut_length = 2
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:2]
if word_specific_length in rule_7:
formula_formation += "NER "
cut_length = 2
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:1]
if word_specific_length in rule_8:
formula_formation += "NER "
cut_length = 1
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
elif(length_actual_word == 2):
word_specific_length = rule_word[initial_length:2]
if word_specific_length in rule_4:
formula_formation += "NIRAI "
cut_length = 2
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:2]
if word_specific_length in rule_7:
formula_formation += "NER "
cut_length = 2
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
else:
word_specific_length = rule_word[initial_length:1]
if word_specific_length in rule_8:
formula_formation += "NER "
cut_length = 1
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
elif(length_actual_word == 1):
word_specific_length = rule_word[initial_length:1]
if word_specific_length in rule_8:
formula_formation += "NER "
cut_length = 1
splitted_formation = actual_text[cut_length:]
formula_verify(splitted_formation)
return formula_formation
else:
return False
| 46.838565
| 90
| 0.490282
| 917
| 10,445
| 5.166848
| 0.077426
| 0.179401
| 0.182355
| 0.111439
| 0.857113
| 0.850781
| 0.845293
| 0.837062
| 0.837062
| 0.837062
| 0
| 0.016035
| 0.456678
| 10,445
| 222
| 91
| 47.04955
| 0.818855
| 0.004691
| 0
| 0.821782
| 0
| 0
| 0.018677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009901
| false
| 0
| 0.004951
| 0
| 0.029703
| 0.004951
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2d07693669a430ec211c20d581992c8b269c417b
| 2,007
|
py
|
Python
|
Projeto-em-DRF/site_adocao/models.py
|
Projetointegradorunivesp/projeto_integrador_site
|
9d71b7c28b66e3c98210d4500454d0ef843a33c0
|
[
"MIT"
] | null | null | null |
Projeto-em-DRF/site_adocao/models.py
|
Projetointegradorunivesp/projeto_integrador_site
|
9d71b7c28b66e3c98210d4500454d0ef843a33c0
|
[
"MIT"
] | null | null | null |
Projeto-em-DRF/site_adocao/models.py
|
Projetointegradorunivesp/projeto_integrador_site
|
9d71b7c28b66e3c98210d4500454d0ef843a33c0
|
[
"MIT"
] | null | null | null |
from django.db import models
class Apoio(models.Model):
nome = models.CharField(max_length=50)
contato = models.CharField(max_length=11, default="")
logo = models.ImageField(upload_to='media', blank=True)
def __str__(self):
return self.nome
class Cachorro(models.Model):
numero_chip = models.IntegerField(max_length=6)
nome = models.CharField(max_length=30)
raca = models.CharField(max_length=30)
sexo = models.CharField(max_length=1)
idade = models.IntegerField(max_length=4)
cor = models.CharField(max_length=30)
descricao = models.CharField(max_length=100)
foto1 = models.ImageField(upload_to='media', blank=True)
foto2 = models.ImageField(upload_to='media', blank=True)
foto3 = models.ImageField(upload_to='media', blank=True)
def __str__(self):
return self.nome
class Gato(models.Model):
numero_chip = models.IntegerField(max_length=6)
nome = models.CharField(max_length=30)
raca = models.CharField(max_length=30)
sexo = models.CharField(max_length=1)
idade = models.IntegerField(max_length=4)
cor = models.CharField(max_length=30)
descricao = models.CharField(max_length=100)
foto1 = models.ImageField(upload_to='media', blank=True)
foto2 = models.ImageField(upload_to='media', blank=True)
foto3 = models.ImageField(upload_to='media', blank=True)
def __str__(self):
return self.nome
class Outro(models.Model):
numero_chip = models.IntegerField(max_length=6)
nome = models.CharField(max_length=30)
raca = models.CharField(max_length=30)
sexo = models.CharField(max_length=1)
idade = models.IntegerField(max_length=4)
cor = models.CharField(max_length=30)
descricao = models.CharField(max_length=100)
foto1 = models.ImageField(upload_to='media', blank=True)
foto2 = models.ImageField(upload_to='media', blank=True)
foto3 = models.ImageField(upload_to='media', blank=True)
def __str__(self):
return self.nome
| 32.370968
| 60
| 0.713503
| 267
| 2,007
| 5.168539
| 0.172285
| 0.15
| 0.221739
| 0.295652
| 0.921739
| 0.901449
| 0.901449
| 0.901449
| 0.901449
| 0.901449
| 0
| 0.029271
| 0.165919
| 2,007
| 61
| 61
| 32.901639
| 0.795102
| 0
| 0
| 0.826087
| 0
| 0
| 0.024938
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.021739
| 0.086957
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
2d314fb6ed78c5e1bd9ad42df1d5b3bd7cc0061c
| 33,938
|
py
|
Python
|
openprocurement/auction/texas/tests/unit/datasources/test_openprocurement_datasource.py
|
oleksiyVeretiuk/openprocurement.auction.gong
|
783ea355c1633e084aaf26a1d6128cc77ae6f642
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auction/texas/tests/unit/datasources/test_openprocurement_datasource.py
|
oleksiyVeretiuk/openprocurement.auction.gong
|
783ea355c1633e084aaf26a1d6128cc77ae6f642
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auction/texas/tests/unit/datasources/test_openprocurement_datasource.py
|
oleksiyVeretiuk/openprocurement.auction.gong
|
783ea355c1633e084aaf26a1d6128cc77ae6f642
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import mock
from uuid import uuid4
from openprocurement.auction.texas.datasource import OpenProcurementAPIDataSource
class TestOpenProcurementAPIDataSource(unittest.TestCase):
datasource_class = OpenProcurementAPIDataSource
def setUp(self):
self.config = {
'resource_api_server': 'https://lb.api-sandbox.ea.openprocurement.org/',
'resource_api_version': '2.4',
'resource_name': 'auction',
'auction_id': '1' * 32,
'resource_api_token': 'api_token',
'AUCTIONS_URL': 'localhost:8090',
'HASH_SECRET': 'secret',
}
class TestInit(TestOpenProcurementAPIDataSource):
def setUp(self):
super(TestInit, self).setUp()
self.request_session = mock.MagicMock()
self.patch_request_session = mock.patch('openprocurement.auction.texas.datasource.RequestsSession')
self.mocked_request_session = self.patch_request_session.start()
self.mocked_request_session.return_value = self.request_session
def tearDown(self):
self.patch_request_session.stop()
def test_init_with_docservice(self):
self.config['with_document_service'] = True
ds_service_config = {
'username': 'username',
'password': 'password',
'url': 'http://docservice_url'
}
self.config['DOCUMENT_SERVICE'] = ds_service_config
datasource = self.datasource_class(self.config)
self.assertEqual(datasource.api_token, self.config['resource_api_token'])
url = '{}api/{}/{}/{}'.format(
self.config['resource_api_server'],
self.config['resource_api_version'],
self.config['resource_name'],
self.config['auction_id']
)
self.assertEqual(datasource.api_url, url)
self.assertIs(datasource.session, self.request_session)
self.assertIs(datasource.session_ds, self.request_session)
self.assertEqual(self.mocked_request_session.call_count, 2)
def test_init_without_docservice(self):
self.config['with_document_service'] = False
datasource = self.datasource_class(self.config)
self.assertEqual(datasource.api_token, self.config['resource_api_token'])
url = '{}api/{}/{}/{}'.format(
self.config['resource_api_server'],
self.config['resource_api_version'],
self.config['resource_name'],
self.config['auction_id']
)
self.assertEqual(datasource.api_url, url)
self.assertEqual(self.config['AUCTIONS_URL'], datasource.auction_url)
self.assertEqual(self.config['HASH_SECRET'], datasource.hash_secret)
self.assertIs(datasource.session, self.request_session)
self.assertIs(hasattr(datasource, 'session_ds'), False)
self.assertEqual(self.mocked_request_session.call_count, 1)
class TestUpdateSourceObject(TestOpenProcurementAPIDataSource):
def setUp(self):
super(TestUpdateSourceObject, self).setUp()
self.datasource = self.datasource_class(self.config)
self.external_data = {'external': 'data'}
self.db_document = {'db': 'document'}
self.history_document = {'auction': 'protocol'}
self.request_session = mock.MagicMock()
self.patch_get_active_bids = mock.patch('openprocurement.auction.texas.datasource.get_active_bids')
self.patch_open_bidders_name = mock.patch('openprocurement.auction.texas.datasource.open_bidders_name')
self.patch_upload_history = mock.patch.object(self.datasource, 'upload_auction_history_document')
self.patch_post_results = mock.patch.object(self.datasource, '_post_results_data')
self.mocked_get_active_bids = self.patch_get_active_bids.start()
self.mocked_open_bidders_name = self.patch_open_bidders_name.start()
self.mocked_upload_history = self.patch_upload_history.start()
self.mocked_post_results = self.patch_post_results.start()
def tearDown(self):
self.patch_get_active_bids.stop()
self.patch_open_bidders_name.stop()
self.patch_upload_history.stop()
self.patch_post_results.stop()
def test_update_source_object_with_bad_document_upload(self):
self.mocked_upload_history.return_value = None
post_response_data = {'response': 'data'}
self.mocked_post_results.return_value = post_response_data
bids_result_data = {'bids': 'result'}
self.mocked_get_active_bids.return_value = bids_result_data
new_db_document = {'db_document': 'with opened names'}
self.mocked_open_bidders_name.return_value = new_db_document
result = self.datasource.update_source_object(self.external_data, self.db_document, self.history_document)
self.assertEqual(result, None)
self.assertEqual(self.mocked_upload_history.call_count, 1)
self.mocked_upload_history.assert_called_with(self.history_document)
self.assertEqual(self.mocked_post_results.call_count, 1)
self.mocked_post_results.assert_called_with(self.external_data, self.db_document)
self.assertEqual(self.mocked_get_active_bids.call_count, 1)
self.mocked_get_active_bids.assert_called_with(post_response_data)
self.assertEqual(self.mocked_open_bidders_name.call_count, 1)
self.mocked_open_bidders_name.assert_called_with(self.db_document, bids_result_data)
def test_update_source_object_with_bad_api_post(self):
doc_id = '1' * 32
self.mocked_upload_history.return_value = doc_id
self.mocked_post_results.return_value = None
result = self.datasource.update_source_object(self.external_data, self.db_document, self.history_document)
self.assertEqual(result, None)
self.assertEqual(self.mocked_upload_history.call_count, 1)
self.mocked_upload_history.assert_called_with(self.history_document)
self.assertEqual(self.mocked_post_results.call_count, 1)
self.mocked_post_results.assert_called_with(self.external_data, self.db_document)
self.assertEqual(self.mocked_get_active_bids.call_count, 0)
self.assertEqual(self.mocked_open_bidders_name.call_count, 0)
def test_update_source_object_with_bad_document_upload_and_api_post(self):
self.mocked_upload_history.return_value = None
self.mocked_post_results.return_value = None
result = self.datasource.update_source_object(self.external_data, self.db_document, self.history_document)
self.assertEqual(result, None)
self.assertEqual(self.mocked_upload_history.call_count, 1)
self.mocked_upload_history.assert_called_with(self.history_document)
self.assertEqual(self.mocked_post_results.call_count, 1)
self.mocked_post_results.assert_called_with(self.external_data, self.db_document)
self.assertEqual(self.mocked_get_active_bids.call_count, 0)
self.assertEqual(self.mocked_open_bidders_name.call_count, 0)
def test_update_source_object_with_second_bad_document_upload(self):
doc_id = '1' * 32
self.mocked_upload_history.side_effect = iter([
doc_id,
None
])
post_response_data = {'response': 'data'}
self.mocked_post_results.return_value = post_response_data
bids_result_data = {'bids': 'result'}
self.mocked_get_active_bids.return_value = bids_result_data
new_db_document = {'db_document': 'with opened names'}
self.mocked_open_bidders_name.return_value = new_db_document
result = self.datasource.update_source_object(self.external_data, self.db_document, self.history_document)
self.assertEqual(result, new_db_document)
self.assertEqual(self.mocked_upload_history.call_count, 2)
self.mocked_upload_history.assert_called_with(self.history_document, doc_id)
self.assertEqual(self.mocked_post_results.call_count, 1)
self.mocked_post_results.assert_called_with(self.external_data, self.db_document)
self.assertEqual(self.mocked_get_active_bids.call_count, 1)
self.mocked_get_active_bids.assert_called_with(post_response_data)
self.assertEqual(self.mocked_open_bidders_name.call_count, 1)
self.mocked_open_bidders_name.assert_called_with(self.db_document, bids_result_data)
def test_successful_update(self):
doc_id = '1' * 32
self.mocked_upload_history.side_effect = iter([
doc_id,
doc_id
])
post_response_data = {'response': 'data'}
self.mocked_post_results.return_value = post_response_data
bids_result_data = {'bids': 'result'}
self.mocked_get_active_bids.return_value = bids_result_data
new_db_document = {'db_document': 'with opened names'}
self.mocked_open_bidders_name.return_value = new_db_document
result = self.datasource.update_source_object(self.external_data, self.db_document, self.history_document)
self.assertEqual(result, new_db_document)
self.assertEqual(self.mocked_upload_history.call_count, 2)
self.mocked_upload_history.assert_called_with(self.history_document, doc_id)
self.assertEqual(self.mocked_post_results.call_count, 1)
self.mocked_post_results.assert_called_with(self.external_data, self.db_document)
self.assertEqual(self.mocked_get_active_bids.call_count, 1)
self.mocked_get_active_bids.assert_called_with(post_response_data)
self.assertEqual(self.mocked_open_bidders_name.call_count, 1)
self.mocked_open_bidders_name.assert_called_with(self.db_document, bids_result_data)
class TestPostResultData(TestOpenProcurementAPIDataSource):
def setUp(self):
super(TestPostResultData, self).setUp()
self.datasource = self.datasource_class(self.config)
self.session = mock.MagicMock()
self.datasource.session = self.session
self.db_document = {'results': []}
self.request_session = mock.MagicMock()
self.patch_make_request = mock.patch('openprocurement.auction.texas.datasource.make_request')
self.patch_generate_request_id = mock.patch('openprocurement.auction.texas.datasource.generate_request_id')
self.patch_get_latest_bid_for_bidder = mock.patch('openprocurement.auction.texas.datasource.get_latest_bid_for_bidder')
self.mocked_make_request = self.patch_make_request.start()
self.mocked_generate_request_id = self.patch_generate_request_id.start()
self.request_id = uuid4().hex
self.mocked_generate_request_id.return_value = self.request_id
self.mocked_get_latest_bid_for_bidder = self.patch_get_latest_bid_for_bidder.start()
def tearDown(self):
self.patch_make_request.stop()
self.patch_generate_request_id.stop()
self.patch_get_latest_bid_for_bidder.stop()
def test_post_results_data_with_bids_in_active(self):
external_data = {'data': {
'bids': [
{
'status': 'draft',
},
{
'value': {'amount': 1000},
'date': 'bid create date',
'status': 'active',
'id': '2' * 32
},
]
}}
last_bid_of_active_bidder = {
'amount': 10000,
'time': 'time of bid',
'id': '2' * 32
}
self.mocked_get_latest_bid_for_bidder.return_value = last_bid_of_active_bidder
data_with_results = {
'data': {
'bids': [
{
'status': 'draft',
},
{
'value': {'amount': last_bid_of_active_bidder['amount']},
'date': last_bid_of_active_bidder['time'],
'status': 'active',
'id': '2' * 32
}
]
}
}
self.datasource._post_results_data(external_data, self.db_document)
self.assertEqual(self.mocked_get_latest_bid_for_bidder.call_count, 1)
self.mocked_get_latest_bid_for_bidder.assert_called_with(self.db_document['results'], last_bid_of_active_bidder['id'])
self.assertEqual(self.mocked_make_request.call_count, 1)
self.mocked_make_request.assert_called_with(
self.datasource.api_url + '/auction',
data=data_with_results,
user=self.datasource.api_token,
method='post',
request_id=self.request_id,
session=self.datasource.session
)
def test_post_results_data_with_bid_without_status(self):
external_data = {'data': {
'bids': [
{
'status': 'draft',
},
{
'value': {'amount': 1000},
'date': 'bid create date',
'id': '2' * 32
},
]
}}
last_bid_of_active_bidder = {
'amount': 10000,
'time': 'time of bid',
'id': '2' * 32
}
self.mocked_get_latest_bid_for_bidder.return_value = last_bid_of_active_bidder
data_with_results = {
'data': {
'bids': [
{
'status': 'draft',
},
{
'value': {'amount': last_bid_of_active_bidder['amount']},
'date': last_bid_of_active_bidder['time'],
'id': '2' * 32
}
]
}
}
self.datasource._post_results_data(external_data, self.db_document)
self.assertEqual(self.mocked_get_latest_bid_for_bidder.call_count, 1)
self.mocked_get_latest_bid_for_bidder.assert_called_with(
self.db_document['results'],
last_bid_of_active_bidder['id']
)
self.assertEqual(self.mocked_make_request.call_count, 1)
self.mocked_make_request.assert_called_with(
self.datasource.api_url + '/auction',
data=data_with_results,
user=self.datasource.api_token,
method='post',
request_id=self.request_id,
session=self.datasource.session
)
class TestUploadHistoryDocument(TestOpenProcurementAPIDataSource):
def setUp(self):
super(TestUploadHistoryDocument, self).setUp()
self.history_data = {'auction': 'protocol'}
self.patch_request_session = mock.patch('openprocurement.auction.texas.datasource.RequestsSession')
self.mocked_request_session = self.patch_request_session.start()
self.request_session = mock.MagicMock()
self.mocked_request_session.return_value = self.request_session
self.datasource = self.datasource_class(self.config)
self.patch_upload_audit_with_ds = mock.patch.object(
self.datasource,
'_upload_audit_file_with_document_service'
)
self.patch_upload_audit_without_ds = mock.patch.object(
self.datasource,
'_upload_audit_file_without_document_service'
)
self.mocked_upload_audit_with_ds = self.patch_upload_audit_with_ds.start()
self.mocked_upload_audit_without_ds = self.patch_upload_audit_without_ds.start()
def tearDown(self):
self.patch_request_session.stop()
self.mocked_request_session.stop()
self.patch_upload_audit_with_ds.stop()
self.patch_upload_audit_without_ds.stop()
def test_upload_history_document_with_ds(self):
self.datasource.with_document_service = True
self.mocked_upload_audit_with_ds.return_value = None
result = self.datasource.upload_auction_history_document(self.history_data)
self.assertIsNone(result)
self.assertEqual(self.mocked_upload_audit_with_ds.call_count, 1)
self.mocked_upload_audit_with_ds.assert_called_with(self.history_data, None)
self.assertEqual(self.mocked_upload_audit_without_ds.call_count, 0)
# With doc id
doc_id = '1' * 32
result = self.datasource.upload_auction_history_document(self.history_data, doc_id)
self.assertIsNone(result)
self.assertEqual(self.mocked_upload_audit_with_ds.call_count, 2)
self.mocked_upload_audit_with_ds.assert_called_with(self.history_data, doc_id)
self.assertEqual(self.mocked_upload_audit_without_ds.call_count, 0)
def test_upload_history_document_without_ds(self):
self.datasource.with_document_service = False
self.mocked_upload_audit_without_ds.return_value = None
result = self.datasource.upload_auction_history_document(self.history_data)
self.assertIsNone(result)
self.assertEqual(self.mocked_upload_audit_without_ds.call_count, 1)
self.mocked_upload_audit_without_ds.assert_called_with(self.history_data, None)
self.assertEqual(self.mocked_upload_audit_with_ds.call_count, 0)
# With doc id
doc_id = '1' * 32
result = self.datasource.upload_auction_history_document(self.history_data, doc_id)
self.assertIsNone(result)
self.assertEqual(self.mocked_upload_audit_without_ds.call_count, 2)
self.mocked_upload_audit_without_ds.assert_called_with(self.history_data, doc_id)
self.assertEqual(self.mocked_upload_audit_with_ds.call_count, 0)
def test_successful_upload_with_ds(self):
self.datasource.with_document_service = True
doc_id = '1' * 32
self.mocked_upload_audit_with_ds.return_value = doc_id
result = self.datasource.upload_auction_history_document(self.history_data)
self.assertEqual(result, doc_id)
self.assertEqual(self.mocked_upload_audit_with_ds.call_count, 1)
self.mocked_upload_audit_with_ds.assert_called_with(self.history_data, None)
self.assertEqual(self.mocked_upload_audit_without_ds.call_count, 0)
def test_successful_upload_without_ds(self):
self.datasource.with_document_service = False
doc_id = '1' * 32
self.mocked_upload_audit_without_ds.return_value = doc_id
result = self.datasource.upload_auction_history_document(self.history_data)
self.assertEqual(result, doc_id)
self.assertEqual(self.mocked_upload_audit_without_ds.call_count, 1)
self.mocked_upload_audit_without_ds.assert_called_with(self.history_data, None)
self.assertEqual(self.mocked_upload_audit_with_ds.call_count, 0)
class TestUploadFileWithDS(TestOpenProcurementAPIDataSource):
def setUp(self):
super(TestUploadFileWithDS, self).setUp()
self.ds_service_config = {
'username': 'username',
'password': 'password',
'url': 'http://docservice_url'
}
self.config['DOCUMENT_SERVICE'] = self.ds_service_config
self.config['with_document_service'] = True
self.datasource = self.datasource_class(self.config)
self.history_data = {'auction': 'protocol'}
self.session = mock.MagicMock()
self.session_ds = mock.MagicMock()
self.datasource.session = self.session
self.datasource.session_ds = self.session_ds
self.patch_make_request = mock.patch('openprocurement.auction.texas.datasource.make_request')
self.patch_yaml_dump = mock.patch('openprocurement.auction.texas.datasource.yaml_dump')
self.patch_generate_request_id = mock.patch('openprocurement.auction.texas.datasource.generate_request_id')
self.mock_make_request = self.patch_make_request.start()
self.mock_yaml_dump = self.patch_yaml_dump.start()
self.yaml_doc = {'yaml': 'data'}
self.mock_yaml_dump.return_value = self.yaml_doc
self.mock_generate_request_id = self.patch_generate_request_id.start()
self.request_id = uuid4().hex
self.mock_generate_request_id.return_value = self.request_id
def tearDown(self):
self.patch_generate_request_id.stop()
self.patch_yaml_dump.stop()
self.patch_make_request.stop()
def test_upload_with_doc_id(self):
success_put_data_response = {'data': {'id': '1' * 32}}
ds_response = {'ds': 'response'}
self.mock_make_request.side_effect = iter([
ds_response,
success_put_data_response
])
doc_id = uuid4().hex
result = self.datasource._upload_audit_file_with_document_service(self.history_data, doc_id)
self.assertEqual(result, success_put_data_response['data']['id'])
self.assertEqual(self.mock_make_request.call_count, 2)
ds_request = {
'files': {'file': ('audit_{}.yaml'.format(self.config['auction_id']), self.yaml_doc)},
'method': 'post',
'user': self.ds_service_config['username'],
'password': self.ds_service_config['password'],
'session': self.session_ds,
'retry_count': 3
}
# Really bad practise but only way to make assert_called_with to previous call
self.assertEqual(
self.mock_make_request.call_args_list[0][0][0],
self.ds_service_config['url']
)
self.assertEqual(
self.mock_make_request.call_args_list[0][1],
ds_request
)
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/documents/{}'.format(doc_id),
data=ds_response,
user=self.datasource.api_token,
method='put',
request_id=self.request_id,
session=self.session,
retry_count=2
)
def test_upload_without_doc_id(self):
success_put_data_response = {'data': {'id': '1' * 32}}
ds_response = {'ds': 'response'}
self.mock_make_request.side_effect = iter([
ds_response,
success_put_data_response
])
result = self.datasource._upload_audit_file_with_document_service(self.history_data)
self.assertEqual(result, success_put_data_response['data']['id'])
self.assertEqual(self.mock_make_request.call_count, 2)
ds_request = {
'files': {'file': ('audit_{}.yaml'.format(self.config['auction_id']), self.yaml_doc)},
'method': 'post',
'user': self.ds_service_config['username'],
'password': self.ds_service_config['password'],
'session': self.session_ds,
'retry_count': 3
}
# Really bad practise but only way to make assert_called_with to previous call
self.assertEqual(
self.mock_make_request.call_args_list[0][0][0],
self.ds_service_config['url']
)
self.assertEqual(
self.mock_make_request.call_args_list[0][1],
ds_request
)
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/documents',
data=ds_response,
user=self.datasource.api_token,
method='post',
request_id=self.request_id,
session=self.session,
retry_count=2
)
def test_upload_with_bad_api_request(self):
ds_response = {'ds': 'response'}
self.mock_make_request.side_effect = iter([
ds_response,
None
])
result = self.datasource._upload_audit_file_with_document_service(self.history_data)
self.assertEqual(result, None)
self.assertEqual(self.mock_make_request.call_count, 2)
ds_request = {
'files': {'file': ('audit_{}.yaml'.format(self.config['auction_id']), self.yaml_doc)},
'method': 'post',
'user': self.ds_service_config['username'],
'password': self.ds_service_config['password'],
'session': self.session_ds,
'retry_count': 3
}
# Really bad practise but only way to make assert_called_with to previous call
self.assertEqual(
self.mock_make_request.call_args_list[0][0][0],
self.ds_service_config['url']
)
self.assertEqual(
self.mock_make_request.call_args_list[0][1],
ds_request
)
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/documents',
data=ds_response,
user=self.datasource.api_token,
method='post',
request_id=self.request_id,
session=self.session,
retry_count=2
)
class TestUploadFileWithoutDS(TestOpenProcurementAPIDataSource):
def setUp(self):
super(TestUploadFileWithoutDS, self).setUp()
self.datasource = self.datasource_class(self.config)
self.history_data = {'auction': 'protocol'}
self.session = mock.MagicMock()
self.datasource.session = self.session
self.patch_make_request = mock.patch('openprocurement.auction.texas.datasource.make_request')
self.patch_yaml_dump = mock.patch('openprocurement.auction.texas.datasource.yaml_dump')
self.patch_generate_request_id = mock.patch('openprocurement.auction.texas.datasource.generate_request_id')
self.mock_make_request = self.patch_make_request.start()
self.mock_yaml_dump = self.patch_yaml_dump.start()
self.yaml_doc = {'yaml': 'data'}
self.mock_yaml_dump.return_value = self.yaml_doc
self.mock_generate_request_id = self.patch_generate_request_id.start()
self.request_id = uuid4().hex
self.mock_generate_request_id.return_value = self.request_id
def tearDown(self):
self.patch_generate_request_id.stop()
self.patch_yaml_dump.stop()
self.patch_make_request.stop()
def test_upload_with_doc_id(self):
success_put_data_response = {'data': {'id': '1' * 32}}
self.mock_make_request.side_effect = iter([
success_put_data_response
])
doc_id = uuid4().hex
result = self.datasource._upload_audit_file_without_document_service(self.history_data, doc_id)
self.assertEqual(result, success_put_data_response['data']['id'])
self.assertEqual(self.mock_make_request.call_count, 1)
files = {'file': ('audit_{}.yaml'.format(self.config['auction_id']), self.yaml_doc)}
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/documents/{}'.format(doc_id),
files=files,
user=self.datasource.api_token,
method='put',
request_id=self.request_id,
session=self.session,
retry_count=2
)
def test_upload_without_doc_id(self):
success_put_data_response = {'data': {'id': '1' * 32}}
self.mock_make_request.side_effect = iter([
success_put_data_response
])
result = self.datasource._upload_audit_file_without_document_service(self.history_data)
self.assertEqual(result, success_put_data_response['data']['id'])
self.assertEqual(self.mock_make_request.call_count, 1)
files = {'file': ('audit_{}.yaml'.format(self.config['auction_id']), self.yaml_doc)}
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/documents',
files=files,
user=self.datasource.api_token,
method='post',
request_id=self.request_id,
session=self.session,
retry_count=2
)
def test_upload_with_bad_api_request(self):
self.mock_make_request.side_effect = iter([
None
])
result = self.datasource._upload_audit_file_without_document_service(self.history_data)
self.assertEqual(result, None)
self.assertEqual(self.mock_make_request.call_count, 1)
files = {'file': ('audit_{}.yaml'.format(self.config['auction_id']), self.yaml_doc)}
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/documents',
files=files,
user=self.datasource.api_token,
method='post',
request_id=self.request_id,
session=self.session,
retry_count=2
)
class TestSetParticipationUrls(TestOpenProcurementAPIDataSource):
def setUp(self):
super(TestSetParticipationUrls, self).setUp()
self.datasource = self.datasource_class(self.config)
self.history_data = {'auction': 'protocol'}
self.session = mock.MagicMock()
self.datasource.session = self.session
self.patch_make_request = mock.patch('openprocurement.auction.texas.datasource.make_request')
self.patch_generate_request_id = mock.patch('openprocurement.auction.texas.datasource.generate_request_id')
self.patch_calculate_hash = mock.patch('openprocurement.auction.texas.datasource.calculate_hash')
self.mock_make_request = self.patch_make_request.start()
self.mock_calculate_hash = self.patch_calculate_hash.start()
self.hash = 'hash'
self.mock_calculate_hash.return_value = self.hash
self.mock_generate_request_id = self.patch_generate_request_id.start()
self.request_id = uuid4().hex
self.mock_generate_request_id.return_value = self.request_id
def tearDown(self):
self.patch_generate_request_id.stop()
self.patch_make_request.stop()
def test_bid_in_active_status(self):
processed_bid = {
'id': '1' * 32,
'status': 'active'
}
external_data = {
'data': {
'bids': [processed_bid]
}
}
participation_url = self.datasource.auction_url + '/login?bidder_id={}&hash={}'.format(
'1' * 32,
self.hash
)
expected_patch = {
'data': {
'auctionUrl': self.datasource.auction_url,
'bids': [
{
'id': processed_bid['id'],
'participationUrl': participation_url
}
]
}
}
self.datasource.set_participation_urls(external_data)
self.assertEqual(self.mock_generate_request_id.call_count, 1)
self.assertEqual(self.mock_calculate_hash.call_count, 1)
self.mock_calculate_hash.assert_called_with(processed_bid['id'], self.datasource.hash_secret)
self.assertEqual(self.mock_make_request.call_count, 1)
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/auction',
expected_patch,
user=self.datasource.api_token,
request_id=self.request_id,
session=self.session
)
def test_bid_withous_status(self):
processed_bid = {
'id': '1' * 32,
}
external_data = {
'data': {
'bids': [processed_bid]
}
}
participation_url = self.datasource.auction_url + '/login?bidder_id={}&hash={}'.format(
'1' * 32,
self.hash
)
expected_patch = {
'data': {
'auctionUrl': self.datasource.auction_url,
'bids': [
{
'id': processed_bid['id'],
'participationUrl': participation_url
}
]
}
}
self.datasource.set_participation_urls(external_data)
self.assertEqual(self.mock_generate_request_id.call_count, 1)
self.assertEqual(self.mock_calculate_hash.call_count, 1)
self.mock_calculate_hash.assert_called_with(processed_bid['id'], self.datasource.hash_secret)
self.assertEqual(self.mock_make_request.call_count, 1)
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/auction',
expected_patch,
user=self.datasource.api_token,
request_id=self.request_id,
session=self.session
)
def test_bid_in_not_active_status(self):
processed_bid = {
'id': '1' * 32,
'status': 'not_active'
}
external_data = {
'data': {
'bids': [processed_bid]
}
}
expected_patch = {
'data': {
'auctionUrl': self.datasource.auction_url,
'bids': [
{
'id': processed_bid['id'],
}
]
}
}
self.datasource.set_participation_urls(external_data)
self.assertEqual(self.mock_generate_request_id.call_count, 1)
self.assertEqual(self.mock_calculate_hash.call_count, 0)
self.assertEqual(self.mock_make_request.call_count, 1)
self.mock_make_request.assert_called_with(
self.datasource.api_url + '/auction',
expected_patch,
user=self.datasource.api_token,
request_id=self.request_id,
session=self.session
)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestInit))
suite.addTest(unittest.makeSuite(TestUpdateSourceObject))
suite.addTest(unittest.makeSuite(TestPostResultData))
suite.addTest(unittest.makeSuite(TestUploadHistoryDocument))
suite.addTest(unittest.makeSuite(TestUploadFileWithDS))
suite.addTest(unittest.makeSuite(TestUploadFileWithoutDS))
suite.addTest(unittest.makeSuite(TestSetParticipationUrls))
return suite
| 36.969499
| 127
| 0.650598
| 3,905
| 33,938
| 5.282714
| 0.04251
| 0.04896
| 0.056183
| 0.046052
| 0.911048
| 0.871976
| 0.84357
| 0.815842
| 0.783315
| 0.747297
| 0
| 0.006957
| 0.250368
| 33,938
| 917
| 128
| 37.009815
| 0.803899
| 0.007484
| 0
| 0.711207
| 0
| 0
| 0.089316
| 0.033553
| 0
| 0
| 0
| 0
| 0.176724
| 1
| 0.054598
| false
| 0.007184
| 0.005747
| 0
| 0.074713
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
74a4a622b98c53feacd4fd3923bb7e01a4321604
| 74
|
py
|
Python
|
arm64_tester/__init__.py
|
luist18/mast-tool
|
dfa891a95407c6cb4ea58d41237cfa0b974887f7
|
[
"MIT"
] | 1
|
2021-06-09T03:33:03.000Z
|
2021-06-09T03:33:03.000Z
|
arm64_tester/__init__.py
|
luist18/mast-tool
|
dfa891a95407c6cb4ea58d41237cfa0b974887f7
|
[
"MIT"
] | null | null | null |
arm64_tester/__init__.py
|
luist18/mast-tool
|
dfa891a95407c6cb4ea58d41237cfa0b974887f7
|
[
"MIT"
] | null | null | null |
from arm64_tester.test import Test
from arm64_tester.tester import Tester
| 24.666667
| 38
| 0.864865
| 12
| 74
| 5.166667
| 0.416667
| 0.290323
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.108108
| 74
| 2
| 39
| 37
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7ae438a16f81fe6c2cebdf304b4264f17d5749e7
| 30,038
|
py
|
Python
|
ParallelSolvers.py
|
neu-spiral/GraphMatching
|
84729b9d793bf8d7aca99dcc1751b99222d8cdc9
|
[
"MIT"
] | null | null | null |
ParallelSolvers.py
|
neu-spiral/GraphMatching
|
84729b9d793bf8d7aca99dcc1751b99222d8cdc9
|
[
"MIT"
] | null | null | null |
ParallelSolvers.py
|
neu-spiral/GraphMatching
|
84729b9d793bf8d7aca99dcc1751b99222d8cdc9
|
[
"MIT"
] | null | null | null |
#from cvxopt import spmatrix,matrix
#from cvxopt.solvers import qp,lp
from helpers import identityHash,swap,mergedicts,identityHash
import numpy as np
from numpy.linalg import solve as linearSystemSolver,inv
import logging
from debug import logger,Sij_test
from numpy.linalg import matrix_rank
from pprint import pformat
from time import time
import argparse
from pyspark import SparkContext
from operator import add,and_
from proxOp import pnormOp,pnorm_proxop, L1normOp, EuclidiannormOp
from LocalSolvers import LocalL1Solver, LocalL2Solver
class ParallelSolver():
""" A class for a parallel solver object. This object stores an RDD containing "local" data per partition, captured via a local solver object.
The RDD also stores primal and dual variables associated with the arguments of this local solver function, as well as statistics reported by
the last computation of the local solver. The class can be used as an interface to add "homogeneous" objectives in the consensus admm algorithm,
that can be executed in parallel
"""
def __init__(self,LocalSolverClass,data,initvalue,N,rho,silent=False,lean=False, RDD=None, D=None, lambda_linear=1.0, prePartFunc=None):
"""Class constructor. It takes as an argument a local solver class, data (of a form understandable by the local solver class), an initial value for the primal variables, and a boolean value; the latter can be used to suppress the evaluation of the objective.
"""
self.SolverClass=LocalSolverClass
if RDD==None:
if D==None:
if LocalSolverClass==LocalL1Solver or LocalSolverClass==LocalL2Solver:
self.PrimalDualRDD = LocalSolverClass.initializeLocalVariables(Sij=data,initvalue=initvalue,N=N,rho=rho, prePartFunc=prePartFunc).cache() #LocalSolver class should implement class method initializeLocalVariables
else:
self.PrimalDualRDD = LocalSolverClass.initializeLocalVariables(data,initvalue,N,rho,D,lambda_linear).cache()
else:
self.PrimalDualRDD = LocalSolverClass.initializeLocalVariables(data,initvalue,N,rho,D,lambda_linear).cache()
else:
self.PrimalDualRDD = RDD
self.N = N
self.silent=silent
self.lean=lean
self.varsToPartitions = self.PrimalDualRDD.flatMapValues( lambda (solver,P,Phi,stats) : P.keys()).map(swap).partitionBy(self.N).cache()
def joinAndAdapt(self,ZRDD, alpha, rho,checkpoint = False, forceComp=False):
""" Given a ZRDD, adapt the local primal and dual variables. The former are updated via the proximal operator, the latter via gradient ascent.
"""
toUnpersist = self.PrimalDualRDD #Old RDD is to be uncached
def adaptDual(solver, P, Phi, stats, Z, alpha):
"""Update the dual variables."""
return ( solver, P, dict( [ (key,Phi[key]+alpha*(P[key]-Z[key])) for key in Phi ] ), Z)
#Send z to the appropriate partitions
ZtoPartitions = ZRDD.join(self.varsToPartitions,numPartitions=self.N).map(lambda (key,(z,splitIndex)): (splitIndex, (key,z))).partitionBy(self.N,partitionFunc=identityHash).groupByKey().mapValues(list).mapValues(dict)
PrimalDualOldZ=self.PrimalDualRDD.join(ZtoPartitions,numPartitions=self.N)
if not self.silent or forceComp:
oldPrimalResidual = np.sqrt(PrimalDualOldZ.values().map(lambda ((solver,P,Phi,stats),Z): sum( ( (P[key]-Z[key])**2 for key in Z) ) ).reduce(add))
oldObjValue = PrimalDualOldZ.values().map(lambda ((solver,P,Phi,stats),Z): solver.evaluate(Z)).reduce(add) #local solver should implement evaluate
PrimalNewDualOldZ = PrimalDualOldZ.mapValues(lambda ((solver,P,Phi,stats),Z): adaptDual(solver, P, Phi, stats, Z, alpha))
ZbarAndNewDual = PrimalNewDualOldZ.mapValues(lambda (solver,P,Phi,Z): ( solver, dict( [(key, Z[key]-Phi[key]) for key in Z]), Phi ))
self.PrimalDualRDD = ZbarAndNewDual.mapValues( lambda (solver,Zbar,Phi) : (solver,solver.solve(Zbar, rho),Phi)).mapValues(lambda (solver,(P,stats),Phi): (solver,P,Phi,stats)).cache() #Solver should implement solve
#Maybe partitioning is not needed?
if checkpoint:
self.PrimalDualRDD.localCheckpoint()
##Unpersisit commented for now because running time increases.
#toUnpersist.unpersist()
if not self.silent or forceComp:
return (oldPrimalResidual,oldObjValue)
else:
return None
def logstats(self):
""" Return statistics from PrimalDualRDD. In particular, this returns the average, min, and maximum value of each statistic.
"""
rdd = self.PrimalDualRDD
statsonly =rdd.map(lambda (partitionid, (solver,P,Phi,stats)): stats).cache()
#Checkpoint the RDD
# if iteration!=0 and iteration % checkointing_freq == 0:
# statsonly.checkpoint()
stats = statsonly.reduce(lambda x,y: mergedicts(x,y))
minstats = statsonly.reduce(lambda x,y: mergedicts(x,y,min))
maxstats = statsonly.reduce(lambda x,y: mergedicts(x,y,max))
return " ".join([ key+"= %s (%s/%s)" % (str(1.0*stats[key]/self.N),str(minstats[key]),str(maxstats[key])) for key in stats])
def getVars(self, rho):
"""Return the primal variables associated with this RDD. To be used to compute the new consensus variable"""
return self.PrimalDualRDD.flatMap(lambda (partitionId,(solver,P,Phi,stats)): [ (key, ( rho*( P[key]+Phi[key]), rho)) for key in P ] )
def computeDualResidual(self, ZRDDjoinedOldZRDD):
'''Return the squared norm of the dual residual, which is computed as:
S = A^TB(Z^(k+1)-Z^(k))
'''
ZRDDjoinedOldZRDD = ZRDDjoinedOldZRDD.mapValues(lambda (z, zOld): (z-zOld)**2)
return np.sqrt( self.varsToPartitions.join(ZRDDjoinedOldZRDD).mapValues(lambda (splitID, deltaZ): deltaZ).values().reduce(add) )
class ParallelSolverPnorm(ParallelSolver):
"""This class is inheritted from ParallelSolver, it updates P and Y vriables for a general p-norm solver via inner ADMM."""
def __init__(self,LocalSolverClass,data,initvalue,N,rho,rho_inner, p, silent=False,lean=False, RDD=None, debug=False, prePartFunc=None):
"""Class constructor. It takes as an argument a local solver class, data (of a form understandable by the local solver class), an initial value for the primal variables, and a boolean value; the latter can be used to suppress the evaluation of the objective.
"""
self.SolverClass=LocalSolverClass
if RDD==None:
self.PrimalDualRDD = LocalSolverClass.initializeLocalVariables(Sij=data,initvalue=initvalue,N=N,rho=rho, rho_inner=rho_inner, prePartFunc=prePartFunc).cache() #LocalSolver class should implement class method initializeLocalVariables
else:
self.PrimalDualRDD = RDD
self.N = N
self.silent=silent
self.lean=lean
self.debug = debug #In debug mode keep track of the obj. val. and residuals
self.rho_inner = rho_inner
self.p = p
self.varsToPartitions = self.PrimalDualRDD.flatMapValues( lambda (solver,P,Y,Phi,Upsilon, stats) : P.keys()).map(swap).partitionBy(self.N).cache()
def joinAndAdapt(self,ZRDD, alpha, rho, alpha_inner=1.0, maxiters = 100, residual_tol = 1.e-06, checkpoint = False, logger=None, forceComp=False):
rho_inner = self.rho_inner
p_param = self.p
#In debug mode keep track of the obj. val. and residuals
if self.debug:
trace = {}
#Send z to the appropriate partitions
def Fm(objs,P):
"""
Compute the FPm functions, i.e., FPm = \sum_{ij\in S1} P[(i,j)]-\sum_{ij \in S2} P[(i,j)]
"""
FPm = {}
for edge in objs:
(set1, set2) = objs[edge]
tmp_val = 0.0
for key in set1:
tmp_val += P[key]
for key in set2:
tmp_val -= P[key]
FPm[edge] = tmp_val
return FPm
ZtoPartitions = ZRDD.join(self.varsToPartitions,numPartitions=self.N).map(lambda (key,(z,splitIndex)): (splitIndex, (key,z))).partitionBy(self.N,partitionFunc=identityHash).groupByKey().mapValues(list).mapValues(dict)
PrimalDualOldZ=self.PrimalDualRDD.join(ZtoPartitions,numPartitions=self.N)
if not self.silent or forceComp:
oldPrimalResidual = np.sqrt(PrimalDualOldZ.values().map(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): sum( ( (P[key]-Z[key])**2 for key in Z) ) ).reduce(add))
oldObjValue = (PrimalDualOldZ.values().map(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): solver.evaluate(Z, p_param)).reduce(add))**(1./p_param) #local solver should compute p-norm to the power p.
PrimalNewDualOldZ = PrimalDualOldZ.mapValues(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): ( solver, P, Y,dict( [ (key,Phi[key]+alpha*(P[key]-Z[key])) for key in Phi ] ),Upsilon, stats, Z))
ZbarPrimalDual = PrimalNewDualOldZ.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats, Z): ( solver,P,Y,Phi,Upsilon,stats,dict( [(key, Z[key]-Phi[key]) for key in Z])))
last = time()
start_time = time()
#Start the inner ADMM iterations
for i in range(maxiters):
#Compute vectors Fm(Pm)
FmZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats,Zbar):(solver, Fm(solver.objectives,P),P,Y,Phi,Upsilon,stats,Zbar))
if not self.lean or (self.lean and i==maxiters-1):
#Compute the residual
OldinnerResidual = np.sqrt(FmZbarPrimalDual.values().flatMap(lambda (solver, FPm,OldP,Y,Phi,Upsilon,stats,Zbar): [(Y[key]-FPm[key])**2 for key in Y]).reduce(add) )
##ADMM steps
#Adapt the dual varible Upsilon
FmYNewUpsilonPPhi = FmZbarPrimalDual.mapValues(lambda (solver, FPm,OldP, Y,Phi,Upsilon,stats,Zbar): (solver, FPm, OldP, Y, Phi, dict( [(key,Upsilon[key]+alpha_inner*(Y[key]-FPm[key])) for key in Y]),stats,Zbar))
#Update Y via prox. op. for p-norm
NewYUpsilonPhi, Ynorm = pnormOp(FmYNewUpsilonPPhi.mapValues(lambda (solver, FPm, OldP, Y, Phi, Upsilon, stats, Zbar):(dict([(key,FPm[key]-Upsilon[key]) for key in Upsilon]), (solver, OldP, Y, Phi, Upsilon,stats,Zbar) ) ), p_param, rho_inner, 1.e-6, self.lean and i<maxiters-1 )
NewYUpsilonPhi = NewYUpsilonPhi.mapValues(lambda (Y, (solver, OldP, OldY, Phi, Upsilon, stats, Zbar)): (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar) )
if not self.lean or (self.lean and i==maxiters-1):
#Compute the dual residual for Y
DualInnerResidual_Y = np.sqrt( NewYUpsilonPhi.values().flatMap(lambda (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar): [ (Y[key] -OldY[key])**2 for key in Y]).reduce(add) )
NewYUpsilonPhi = NewYUpsilonPhi.mapValues(lambda (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar):(solver, OldP, Y, Phi, Upsilon,stats, Zbar) )
#Update P via solving a least-square problem
ZbarPrimalDual = NewYUpsilonPhi.mapValues(lambda (solver, OldP, Y, Phi,Upsilon,stats,Zbar): (solver,solver.solve(Y, Zbar, Upsilon, rho, rho_inner),OldP, Y, Phi, Upsilon, stats, Zbar)).mapValues(lambda (solver, (P, stats),OldP, Y, Phi, Upsilon, stats_old, Zbar): (solver,P,OldP,Y,Phi,Upsilon, stats, Zbar))
if not self.lean or (self.lean and i==maxiters-1):
#Compute the dual residual for P
DualInnerResidual_P = np.sqrt( ZbarPrimalDual.values().flatMap(lambda (solver,P,OldP,Y,Phi,Upsilon, stats, Zbar): [ (P[key] -OldP[key])**2 for key in P]).reduce(add) )
#Total dual residual
DualInnerResidual = DualInnerResidual_P + DualInnerResidual_Y
ZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,P,OldP,Y,Phi,Upsilon, stats, Zbar): (solver,P,Y,Phi,Upsilon, stats, Zbar))
if not self.lean or (self.lean and i==maxiters-1):
objval = ZbarPrimalDual.values().flatMap(lambda (solver,P,Y,Phi,Upsilon, stats, Zbar):[(P[key]-Zbar[key])**2 for key in P]).reduce(lambda x,y:x+y) + Ynorm
now = time()
if logger != None and ( not self.lean or (self.lean and i==maxiters-1) ):
logger.info("Inner ADMM iteration %d, p-norm is %f, objective is %f, residual is %f, dual residual is %f, time is %f" %(i, Ynorm, objval, OldinnerResidual, DualInnerResidual, now-last))
if (not self.lean or (self.lean and i==maxiters-1)) and self.debug:
trace[i] = {}
trace[i]['OBJ'] = objval
trace[i]['PRES'] = OldinnerResidual
trace[i]['DRES'] = DualInnerResidual
trace[i]['IT_TIME'] = now-last
trace[i]['TIME'] = now-start_time
last = time()
if not self.lean and DualInnerResidual<residual_tol and OldinnerResidual<residual_tol:
break
self.PrimalDualRDD = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats, Zbar): (solver,P,Y,Phi,Upsilon,stats)).cache()
#Checkpointing
if checkpoint:
self.PrimalDualRDD.localCheckpoint()
if self.debug:
return trace
if not self.silent or forceComp:
return (oldPrimalResidual,oldObjValue)
else:
return None
def logstats(self):
""" Return statistics from PrimalDualRDD. In particular, this returns the average, min, and maximum value of each statistic.
"""
rdd = self.PrimalDualRDD
statsonly =rdd.map(lambda (partitionid, (solver,P,Y,Phi,Upsilon,stats)): stats).cache()
#Checkpoint the RDD
# if iteration!=0 and iteration % checkointing_freq == 0:
# statsonly.checkpoint()
stats = statsonly.reduce(lambda x,y: mergedicts(x,y))
minstats = statsonly.reduce(lambda x,y: mergedicts(x,y,min))
maxstats = statsonly.reduce(lambda x,y: mergedicts(x,y,max))
return " ".join([ key+"= %s (%s/%s)" % (str(1.0*stats[key]/self.N),str(minstats[key]),str(maxstats[key])) for key in stats])
def getVars(self, rho):
return self.PrimalDualRDD.flatMap(lambda (partitionId,(solver,P,Y,Phi,Upsilon,stats)): [ (key, ( rho*( P[key]+Phi[key]), rho)) for key in P ] )
class ParallelSolver1norm(ParallelSolverPnorm):
def joinAndAdapt(self,ZRDD, alpha, rho,alpha_inner=1.0, maxiters = 100, residual_tol = 1.e-06, checkpoint = False, logger = None, forceComp=False):
rho_inner = self.rho_inner
p_param = 1
if self.debug:
trace = {}
#Send z to the appropriate partitions
def Fm(objs,P):
"""
Compute the FPm functions, i.e., FPm = \sum_{ij\in S1} P[(i,j)]-\sum_{ij \in S2} P[(i,j)]
"""
FPm = {}
for edge in objs:
(set1, set2) = objs[edge]
tmp_val = 0.0
for key in set1:
tmp_val += P[key]
for key in set2:
tmp_val -= P[key]
FPm[edge] = tmp_val
return FPm
ZtoPartitions = ZRDD.join(self.varsToPartitions,numPartitions=self.N).map(lambda (key,(z,splitIndex)): (splitIndex, (key,z))).partitionBy(self.N,partitionFunc=identityHash).groupByKey().mapValues(list).mapValues(dict)
PrimalDualOldZ=self.PrimalDualRDD.join(ZtoPartitions,numPartitions=self.N)
if not self.silent or forceComp:
oldPrimalResidual = np.sqrt(PrimalDualOldZ.values().map(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): sum( ( (P[key]-Z[key])**2 for key in Z) ) ).reduce(add))
oldObjValue = (PrimalDualOldZ.values().map(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): solver.evaluate(Z, p_param)).reduce(add))**(1./p_param) #local solver should compute p-norm to the power p.
PrimalNewDualOldZ = PrimalDualOldZ.mapValues(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): ( solver, P, Y,dict( [ (key,Phi[key]+alpha*(P[key]-Z[key])) for key in Phi ] ),Upsilon, stats, Z))
ZbarPrimalDual = PrimalNewDualOldZ.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats, Z): ( solver,P,Y,Phi,Upsilon,stats,dict( [(key, Z[key]-Phi[key]) for key in Z])))
#Initialization for Inner ADMM
#initialize Upsilon to 0
# ZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats,Zbar):(solver, P, Y, Phi, dict([(key,0.0) for key in Upsilon]), stats, Zbar))
#initialize P by solving
# ZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats,Zbar):(solver, solver.solve(Y, Zbar, Upsilon, rho, rho_inner), Y, Phi,Upsilon,stats,Zbar)).mapValues(lambda (solver, (P, stats0), Y, Phi,Upsilon,stats,Zbar): (solver,P,Y,Phi,Upsilon,stats,Zbar))
last = time()
start_time = last
#Start the inner ADMM iterations
for i in range(maxiters):
#Compute vectors Fm(Pm)
FmZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats,Zbar):(solver, Fm(solver.objectives,P),P,Y,Phi,Upsilon,stats,Zbar))
if not self.lean or (self.lean and i==maxiters-1):
#Compute the residual
OldinnerResidual = np.sqrt(FmZbarPrimalDual.values().flatMap(lambda (solver, FPm,OldP,Y,Phi,Upsilon,stats,Zbar): [(Y[key]-FPm[key])**2 for key in Y]).reduce(add) )
##ADMM steps
#Adapt the dual varible Upsilon
FmYNewUpsilonPPhi = FmZbarPrimalDual.mapValues(lambda (solver, FPm, OldP,Y,Phi,Upsilon,stats,Zbar): (solver, FPm,OldP, Y, Phi, dict( [(key,Upsilon[key]+alpha_inner*(Y[key]-FPm[key])) for key in Y]),stats,Zbar))
#Update Y via prox. op. for ell_1 norm
NewYUpsilonPhi, Ynorm = L1normOp(FmYNewUpsilonPPhi.mapValues(lambda (solver, FPm,OldP, Y, Phi, Upsilon, stats, Zbar):(dict([(key,FPm[key]-Upsilon[key]) for key in Upsilon]), (solver, OldP,Y, Phi, Upsilon,stats,Zbar) ) ), rho_inner , self.lean and i<maxiters-1)
NewYUpsilonPhi = NewYUpsilonPhi.mapValues(lambda (Y, (solver, OldP, OldY, Phi, Upsilon, stats, Zbar)): (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar) )
if not self.lean or (self.lean and i==maxiters-1):
#Compute the dual residual for Y
DualInnerResidual_Y = np.sqrt( NewYUpsilonPhi.values().flatMap(lambda (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar): [ (Y[key] -OldY[key])**2 for key in Y]).reduce(add) )
NewYUpsilonPhi = NewYUpsilonPhi.mapValues(lambda (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar):(solver, OldP, Y, Phi, Upsilon,stats, Zbar) )
#Update P via solving a least-square problem
ZbarPrimalDual = NewYUpsilonPhi.mapValues(lambda (solver, OldP, Y, Phi,Upsilon,stats,Zbar): (solver,solver.solve(Y, Zbar, Upsilon, rho, rho_inner), OldP, Y, Phi, Upsilon, stats, Zbar)).mapValues(lambda (solver, (P, stats), OldP, Y, Phi, Upsilon, stats_old, Zbar): (solver,OldP,P,Y,Phi,Upsilon, stats, Zbar))
if not self.lean or (self.lean and i==maxiters-1):
#Compute the dual residual for P
DualInnerResidual_P = np.sqrt( ZbarPrimalDual.values().flatMap(lambda (solver,OldP,P,Y,Phi,Upsilon, stats, Zbar): [ (P[key] -OldP[key])**2 for key in P]).reduce(add) )
#Total dual residual
DualInnerResidual = DualInnerResidual_P + DualInnerResidual_Y
ZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,OldP,P,Y,Phi,Upsilon, stats, Zbar): (solver,P,Y,Phi,Upsilon, stats, Zbar))
if not self.lean or (self.lean and i==maxiters-1):
objval = ZbarPrimalDual.values().flatMap(lambda (solver,P,Y,Phi,Upsilon, stats, Zbar):[(P[key]-Zbar[key])**2 for key in P]).reduce(lambda x,y:x+y) + Ynorm
now = time()
if logger != None and (not self.lean or (self.lean and i==maxiters-1)):
logger.info("Inner ADMM iteration %d, p-norm is %f, objective is %f, residual is %f, dual residual is %f, iteration time is %f" %(i, Ynorm, objval, OldinnerResidual, DualInnerResidual, now-last))
if (not self.lean or (self.lean and i==maxiters-1)) and self.debug:
trace[i] = {}
trace[i]['OBJ'] = objval
trace[i]['PRES'] = OldinnerResidual
trace[i]['DRES'] = DualInnerResidual
trace[i]['IT_TIME'] = now-last
trace[i]['TIME'] = now-start_time
last = time()
if not self.lean and DualInnerResidual<residual_tol and OldinnerResidual<residual_tol:
break
self.PrimalDualRDD = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats, Zbar): (solver,P,Y,Phi,Upsilon,stats)).cache()
#Checkpointing
if checkpoint:
self.PrimalDualRDD.localCheckpoint()
if self.debug:
return trace
if not self.silent or forceComp:
return (oldPrimalResidual,oldObjValue)
else:
return None
class ParallelSolver2norm(ParallelSolverPnorm):
def joinAndAdapt(self,ZRDD, alpha, rho, alpha_inner=1.0, maxiters = 100, residual_tol = 1.e-06, accelerated=False, checkpoint = False, logger = None, forceComp=False):
rho_inner = self.rho_inner
p_param = 2
if self.debug:
trace = {}
#Send z to the appropriate partitions
def Fm(objs,P):
"""
Compute the FPm functions, i.e., FPm = \sum_{ij\in S1} P[(i,j)]-\sum_{ij \in S2} P[(i,j)]
"""
FPm = {}
for edge in objs:
(set1, set2) = objs[edge]
tmp_val = 0.0
for key in set1:
tmp_val += P[key]
for key in set2:
tmp_val -= P[key]
FPm[edge] = tmp_val
return FPm
ZtoPartitions = ZRDD.join(self.varsToPartitions,numPartitions=self.N).map(lambda (key,(z,splitIndex)): (splitIndex, (key,z))).partitionBy(self.N,partitionFunc=identityHash).groupByKey().mapValues(list).mapValues(dict)
PrimalDualOldZ=self.PrimalDualRDD.join(ZtoPartitions,numPartitions=self.N)
if not self.silent or forceComp:
oldPrimalResidual = np.sqrt(PrimalDualOldZ.values().map(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): sum( ( (P[key]-Z[key])**2 for key in Z) ) ).reduce(add))
oldObjValue = (PrimalDualOldZ.values().map(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): solver.evaluate(Z, p_param)).reduce(add))**(1./p_param) #local solver should compute p-norm to the power p.
PrimalNewDualOldZ = PrimalDualOldZ.mapValues(lambda ((solver,P,Y,Phi,Upsilon,stats),Z): ( solver, P, Y,dict( [ (key,Phi[key]+alpha*(P[key]-Z[key])) for key in Phi ] ),Upsilon, stats, Z))
ZbarPrimalDual = PrimalNewDualOldZ.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats, Z): ( solver,P,Y,Phi,Upsilon,stats,dict( [(key, Z[key]-Phi[key]) for key in Z])))
last = time()
start_time = last
#Start the inner ADMM iterations
if accelerated:
#For accelerated ADMM, keep track of old dual variables Upsilon as well, plus add Upsilon hat. (see Alg. 2 in Accelerated Alternating Direction Method of Multipliers by Kadkhodaie et al.)
ZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats,Zbar):(solver,P,Y,Phi,Upsilon,Upsilon,Upsilon,stats,Zbar))
ak = 1.
for i in range(maxiters):
#Compute vectors Fm(Pm)
if accelerated:
FmZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,OldUpsilon,HatUpsilon, stats,Zbar):(solver, Fm(solver.objectives,P),P,Y,Phi,Upsilon, OldUpsilon, HatUpsilon, stats,Zbar))
else:
FmZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats,Zbar):(solver, Fm(solver.objectives,P),P,Y,Phi,Upsilon,stats,Zbar))
if not self.lean or (self.lean and i==maxiters-1):
#Compute the residual
OldinnerResidual = np.sqrt(FmZbarPrimalDual.values().flatMap(lambda (solver, FPm,OldP,Y,Phi,Upsilon,stats,Zbar): [(Y[key]-FPm[key])**2 for key in Y]).reduce(add) )
##ADMM steps
#Adapt the dual varible Upsilon
if accelerated:
#Replace OldUpsilon
FmYNewUpsilonPPhi = FmZbarPrimalDual.mapValues(lambda (solver, FPm,OldP, Y,Phi,Upsilon, OldUpsilon, HatUpsilon,stats,Zbar): (solver, FPm,OldP, Y,Phi,Upsilon, Upsilon, HatUpsilon,stats,Zbar))
#Update Upsilon
FmYNewUpsilonPPhi = FmZbarPrimalDual.mapValues(lambda (solver, FPm,OldP, Y,Phi,Upsilon, OldUpsilon, HatUpsilon,stats,Zbar): (solver, FPm, OldP, Y, Phi, dict( [(key,HatUpsilon[key]+alpha_inner*(Y[key]-FPm[key])) for key in Y]), OldUpsilon, HatUpsilon, stats,Zbar))
#Update HatUpsilon
FmYNewUpsilonPPhi = FmZbarPrimalDual.mapValues(lambda (solver, FPm,OldP, Y,Phi,Upsilon, OldUpsilon, HatUpsilon,stats,Zbar): (solver, FPm,OldP, Y,Phi,Upsilon,OldUpsilon , dict( [(key, Upsilon[key] + (ak-1.)/(ak+1.)*(Upsilon[key]-OldUpsilon[key])) for key in Upsilon]), stats,Zbar))
#Update ak
ak = 0.5 * (1. + np.sqrt(1+4*ak**2))
else:
FmYNewUpsilonPPhi = FmZbarPrimalDual.mapValues(lambda (solver, FPm,OldP, Y,Phi,Upsilon,stats,Zbar): (solver, FPm, OldP, Y, Phi, dict( [(key,Upsilon[key]+alpha_inner*(Y[key]-FPm[key])) for key in Y]),stats,Zbar))
#Update Y via prox. op. for ell_2 norm
if accelerated:
NewYUpsilonPhi, Ynorm = EuclidiannormOp(FmYNewUpsilonPPhi.mapValues(lambda (solver, FPm,OldP, Y, Phi, Upsilon, OldUpsilon, HatUpsilon, stats, Zbar):(dict([(key,FPm[key]-Upsilon[key]) for key in Upsilon]), (solver, OldP, Y, Phi, Upsilon, OldUpsilon, HatUpsilon, stats, Zbar) ) ), rho_inner, self.lean and i<maxiters-1)
NewYUpsilonPhi = NewYUpsilonPhi.mapValues(lambda (Y, (solver, OldP, OldY, Phi, Upsilon, OldUpsilon, HatUpsilon, stats, Zbar)): (solver, OldP, Y, OldY, Phi, Upsilon, OldUpsilon, HatUpsilon, stats, Zbar) )
else:
NewYUpsilonPhi, Ynorm = EuclidiannormOp(FmYNewUpsilonPPhi.mapValues(lambda (solver, FPm,OldP, Y, Phi, Upsilon, stats, Zbar):(dict([(key,FPm[key]-Upsilon[key]) for key in Upsilon]), (solver, OldP, Y, Phi, Upsilon,stats,Zbar) ) ), rho_inner, self.lean and i<maxiters-1)
NewYUpsilonPhi = NewYUpsilonPhi.mapValues(lambda (Y, (solver, OldP, OldY, Phi, Upsilon, stats, Zbar)): (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar) )
#To Be Continued
if not self.lean or (self.lean and i==maxiters-1):
#Compute the dual residual for Y
DualInnerResidual_Y = np.sqrt( NewYUpsilonPhi.values().flatMap(lambda (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar): [ (Y[key] -OldY[key])**2 for key in Y]).reduce(add) )
NewYUpsilonPhi = NewYUpsilonPhi.mapValues(lambda (solver, OldP, Y, OldY, Phi, Upsilon,stats, Zbar):(solver, OldP, Y, Phi, Upsilon,stats, Zbar) )
#Update P via solving a least-square problem
ZbarPrimalDual = NewYUpsilonPhi.mapValues(lambda (solver, OldP, Y, Phi,Upsilon,stats,Zbar): (solver,solver.solve(Y, Zbar, Upsilon, rho, rho_inner),OldP, Y, Phi, Upsilon, stats, Zbar)).mapValues(lambda (solver, (P, stats), OldP, Y, Phi, Upsilon, stats_old, Zbar): (solver,OldP,P,Y,Phi,Upsilon, stats, Zbar))
if not self.lean or (self.lean and i==maxiters-1):
#Compute the dual residual for P
DualInnerResidual_P = np.sqrt( ZbarPrimalDual.values().flatMap(lambda (solver,OldP,P,Y,Phi,Upsilon, stats, Zbar): [ (P[key] -OldP[key])**2 for key in P]).reduce(add) )
#Total dual residual
DualInnerResidual = DualInnerResidual_P + DualInnerResidual_Y
ZbarPrimalDual = ZbarPrimalDual.mapValues(lambda (solver,OldP,P,Y,Phi,Upsilon, stats, Zbar): (solver,P,Y,Phi,Upsilon, stats, Zbar))
if not self.lean or (self.lean and i==maxiters-1):
objval = ZbarPrimalDual.values().flatMap(lambda (solver,P,Y,Phi,Upsilon, stats, Zbar):[(P[key]-Zbar[key])**2 for key in P]).reduce(lambda x,y:x+y) + Ynorm
now = time()
if logger != None and (not self.lean or (self.lean and i==maxiters-1)):
logger.info("Inner ADMM iteration %d, p-norm is %f, objective is %f, residual is %f, dual residual is %f, iteration time is %f" %(i, Ynorm, objval, OldinnerResidual, DualInnerResidual, now-last))
if (not self.lean or (self.lean and i==maxiters-1)) and self.debug:
trace[i] = {}
trace[i]['OBJ'] = objval
trace[i]['PRES'] = OldinnerResidual
trace[i]['DRES'] = DualInnerResidual
trace[i]['IT_TIME'] = now-last
trace[i]['TIME'] = now-start_time
last = time()
if not self.lean and DualInnerResidual<residual_tol and OldinnerResidual<residual_tol:
break
self.PrimalDualRDD = ZbarPrimalDual.mapValues(lambda (solver,P,Y,Phi,Upsilon,stats, Zbar): (solver,P,Y,Phi,Upsilon,stats)).cache()
#Checkpointing
if checkpoint:
self.PrimalDualRDD.localCheckpoint()
if self.debug:
return trace
if not self.silent or forceComp:
return (oldPrimalResidual,oldObjValue)
else:
return None
| 64.046908
| 336
| 0.634396
| 3,892
| 30,038
| 4.868962
| 0.082734
| 0.053826
| 0.07124
| 0.063325
| 0.858839
| 0.851662
| 0.841055
| 0.83372
| 0.810396
| 0.801372
| 0
| 0.005629
| 0.237033
| 30,038
| 468
| 337
| 64.183761
| 0.821232
| 0.088954
| 0
| 0.778157
| 0
| 0.010239
| 0.016792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.044369
| null | null | 0.003413
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7aebe7b18d3ca35925bee6a0877f4de269856a61
| 19,234
|
py
|
Python
|
sdk/python/pulumi_gcp/compute/router_nat.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/compute/router_nat.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/compute/router_nat.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['RouterNat']
class RouterNat(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
drain_nat_ips: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
icmp_idle_timeout_sec: Optional[pulumi.Input[float]] = None,
log_config: Optional[pulumi.Input[pulumi.InputType['RouterNatLogConfigArgs']]] = None,
min_ports_per_vm: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
nat_ip_allocate_option: Optional[pulumi.Input[str]] = None,
nat_ips: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
router: Optional[pulumi.Input[str]] = None,
source_subnetwork_ip_ranges_to_nat: Optional[pulumi.Input[str]] = None,
subnetworks: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RouterNatSubnetworkArgs']]]]] = None,
tcp_established_idle_timeout_sec: Optional[pulumi.Input[float]] = None,
tcp_transitory_idle_timeout_sec: Optional[pulumi.Input[float]] = None,
udp_idle_timeout_sec: Optional[pulumi.Input[float]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A NAT service created in a router.
To get more information about RouterNat, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routers)
* How-to Guides
* [Google Cloud Router](https://cloud.google.com/router/docs/)
## Example Usage
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[List[pulumi.Input[str]]] drain_nat_ips: A list of URLs of the IP resources to be drained. These IPs must be
valid static external IPs that have been assigned to the NAT.
:param pulumi.Input[float] icmp_idle_timeout_sec: Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.
:param pulumi.Input[pulumi.InputType['RouterNatLogConfigArgs']] log_config: Configuration for logging on NAT
Structure is documented below.
:param pulumi.Input[float] min_ports_per_vm: Minimum number of ports allocated to a VM from this NAT.
:param pulumi.Input[str] name: Self-link of subnetwork to NAT
:param pulumi.Input[str] nat_ip_allocate_option: How external IPs should be allocated for this NAT. Valid values are
`AUTO_ONLY` for only allowing NAT IPs allocated by Google Cloud
Platform, or `MANUAL_ONLY` for only user-allocated NAT IP addresses.
Possible values are `MANUAL_ONLY` and `AUTO_ONLY`.
:param pulumi.Input[List[pulumi.Input[str]]] nat_ips: Self-links of NAT IPs. Only valid if natIpAllocateOption
is set to MANUAL_ONLY.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the router and NAT reside.
:param pulumi.Input[str] router: The name of the Cloud Router in which this NAT will be configured.
:param pulumi.Input[str] source_subnetwork_ip_ranges_to_nat: How NAT should be configured per Subnetwork.
If `ALL_SUBNETWORKS_ALL_IP_RANGES`, all of the
IP ranges in every Subnetwork are allowed to Nat.
If `ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES`, all of the primary IP
ranges in every Subnetwork are allowed to Nat.
`LIST_OF_SUBNETWORKS`: A list of Subnetworks are allowed to Nat
(specified in the field subnetwork below). Note that if this field
contains ALL_SUBNETWORKS_ALL_IP_RANGES or
ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any
other RouterNat section in any Router for this network in this region.
Possible values are `ALL_SUBNETWORKS_ALL_IP_RANGES`, `ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES`, and `LIST_OF_SUBNETWORKS`.
:param pulumi.Input[List[pulumi.Input[pulumi.InputType['RouterNatSubnetworkArgs']]]] subnetworks: One or more subnetwork NAT configurations. Only used if
`source_subnetwork_ip_ranges_to_nat` is set to `LIST_OF_SUBNETWORKS`
Structure is documented below.
:param pulumi.Input[float] tcp_established_idle_timeout_sec: Timeout (in seconds) for TCP established connections.
Defaults to 1200s if not set.
:param pulumi.Input[float] tcp_transitory_idle_timeout_sec: Timeout (in seconds) for TCP transitory connections.
Defaults to 30s if not set.
:param pulumi.Input[float] udp_idle_timeout_sec: Timeout (in seconds) for UDP connections. Defaults to 30s if not set.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['drain_nat_ips'] = drain_nat_ips
__props__['icmp_idle_timeout_sec'] = icmp_idle_timeout_sec
__props__['log_config'] = log_config
__props__['min_ports_per_vm'] = min_ports_per_vm
__props__['name'] = name
if nat_ip_allocate_option is None:
raise TypeError("Missing required property 'nat_ip_allocate_option'")
__props__['nat_ip_allocate_option'] = nat_ip_allocate_option
__props__['nat_ips'] = nat_ips
__props__['project'] = project
__props__['region'] = region
if router is None:
raise TypeError("Missing required property 'router'")
__props__['router'] = router
if source_subnetwork_ip_ranges_to_nat is None:
raise TypeError("Missing required property 'source_subnetwork_ip_ranges_to_nat'")
__props__['source_subnetwork_ip_ranges_to_nat'] = source_subnetwork_ip_ranges_to_nat
__props__['subnetworks'] = subnetworks
__props__['tcp_established_idle_timeout_sec'] = tcp_established_idle_timeout_sec
__props__['tcp_transitory_idle_timeout_sec'] = tcp_transitory_idle_timeout_sec
__props__['udp_idle_timeout_sec'] = udp_idle_timeout_sec
super(RouterNat, __self__).__init__(
'gcp:compute/routerNat:RouterNat',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
drain_nat_ips: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
icmp_idle_timeout_sec: Optional[pulumi.Input[float]] = None,
log_config: Optional[pulumi.Input[pulumi.InputType['RouterNatLogConfigArgs']]] = None,
min_ports_per_vm: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
nat_ip_allocate_option: Optional[pulumi.Input[str]] = None,
nat_ips: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
router: Optional[pulumi.Input[str]] = None,
source_subnetwork_ip_ranges_to_nat: Optional[pulumi.Input[str]] = None,
subnetworks: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RouterNatSubnetworkArgs']]]]] = None,
tcp_established_idle_timeout_sec: Optional[pulumi.Input[float]] = None,
tcp_transitory_idle_timeout_sec: Optional[pulumi.Input[float]] = None,
udp_idle_timeout_sec: Optional[pulumi.Input[float]] = None) -> 'RouterNat':
"""
Get an existing RouterNat resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[List[pulumi.Input[str]]] drain_nat_ips: A list of URLs of the IP resources to be drained. These IPs must be
valid static external IPs that have been assigned to the NAT.
:param pulumi.Input[float] icmp_idle_timeout_sec: Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.
:param pulumi.Input[pulumi.InputType['RouterNatLogConfigArgs']] log_config: Configuration for logging on NAT
Structure is documented below.
:param pulumi.Input[float] min_ports_per_vm: Minimum number of ports allocated to a VM from this NAT.
:param pulumi.Input[str] name: Self-link of subnetwork to NAT
:param pulumi.Input[str] nat_ip_allocate_option: How external IPs should be allocated for this NAT. Valid values are
`AUTO_ONLY` for only allowing NAT IPs allocated by Google Cloud
Platform, or `MANUAL_ONLY` for only user-allocated NAT IP addresses.
Possible values are `MANUAL_ONLY` and `AUTO_ONLY`.
:param pulumi.Input[List[pulumi.Input[str]]] nat_ips: Self-links of NAT IPs. Only valid if natIpAllocateOption
is set to MANUAL_ONLY.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the router and NAT reside.
:param pulumi.Input[str] router: The name of the Cloud Router in which this NAT will be configured.
:param pulumi.Input[str] source_subnetwork_ip_ranges_to_nat: How NAT should be configured per Subnetwork.
If `ALL_SUBNETWORKS_ALL_IP_RANGES`, all of the
IP ranges in every Subnetwork are allowed to Nat.
If `ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES`, all of the primary IP
ranges in every Subnetwork are allowed to Nat.
`LIST_OF_SUBNETWORKS`: A list of Subnetworks are allowed to Nat
(specified in the field subnetwork below). Note that if this field
contains ALL_SUBNETWORKS_ALL_IP_RANGES or
ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any
other RouterNat section in any Router for this network in this region.
Possible values are `ALL_SUBNETWORKS_ALL_IP_RANGES`, `ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES`, and `LIST_OF_SUBNETWORKS`.
:param pulumi.Input[List[pulumi.Input[pulumi.InputType['RouterNatSubnetworkArgs']]]] subnetworks: One or more subnetwork NAT configurations. Only used if
`source_subnetwork_ip_ranges_to_nat` is set to `LIST_OF_SUBNETWORKS`
Structure is documented below.
:param pulumi.Input[float] tcp_established_idle_timeout_sec: Timeout (in seconds) for TCP established connections.
Defaults to 1200s if not set.
:param pulumi.Input[float] tcp_transitory_idle_timeout_sec: Timeout (in seconds) for TCP transitory connections.
Defaults to 30s if not set.
:param pulumi.Input[float] udp_idle_timeout_sec: Timeout (in seconds) for UDP connections. Defaults to 30s if not set.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["drain_nat_ips"] = drain_nat_ips
__props__["icmp_idle_timeout_sec"] = icmp_idle_timeout_sec
__props__["log_config"] = log_config
__props__["min_ports_per_vm"] = min_ports_per_vm
__props__["name"] = name
__props__["nat_ip_allocate_option"] = nat_ip_allocate_option
__props__["nat_ips"] = nat_ips
__props__["project"] = project
__props__["region"] = region
__props__["router"] = router
__props__["source_subnetwork_ip_ranges_to_nat"] = source_subnetwork_ip_ranges_to_nat
__props__["subnetworks"] = subnetworks
__props__["tcp_established_idle_timeout_sec"] = tcp_established_idle_timeout_sec
__props__["tcp_transitory_idle_timeout_sec"] = tcp_transitory_idle_timeout_sec
__props__["udp_idle_timeout_sec"] = udp_idle_timeout_sec
return RouterNat(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="drainNatIps")
def drain_nat_ips(self) -> pulumi.Output[Optional[List[str]]]:
"""
A list of URLs of the IP resources to be drained. These IPs must be
valid static external IPs that have been assigned to the NAT.
"""
return pulumi.get(self, "drain_nat_ips")
@property
@pulumi.getter(name="icmpIdleTimeoutSec")
def icmp_idle_timeout_sec(self) -> pulumi.Output[Optional[float]]:
"""
Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.
"""
return pulumi.get(self, "icmp_idle_timeout_sec")
@property
@pulumi.getter(name="logConfig")
def log_config(self) -> pulumi.Output[Optional['outputs.RouterNatLogConfig']]:
"""
Configuration for logging on NAT
Structure is documented below.
"""
return pulumi.get(self, "log_config")
@property
@pulumi.getter(name="minPortsPerVm")
def min_ports_per_vm(self) -> pulumi.Output[Optional[float]]:
"""
Minimum number of ports allocated to a VM from this NAT.
"""
return pulumi.get(self, "min_ports_per_vm")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Self-link of subnetwork to NAT
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natIpAllocateOption")
def nat_ip_allocate_option(self) -> pulumi.Output[str]:
"""
How external IPs should be allocated for this NAT. Valid values are
`AUTO_ONLY` for only allowing NAT IPs allocated by Google Cloud
Platform, or `MANUAL_ONLY` for only user-allocated NAT IP addresses.
Possible values are `MANUAL_ONLY` and `AUTO_ONLY`.
"""
return pulumi.get(self, "nat_ip_allocate_option")
@property
@pulumi.getter(name="natIps")
def nat_ips(self) -> pulumi.Output[Optional[List[str]]]:
"""
Self-links of NAT IPs. Only valid if natIpAllocateOption
is set to MANUAL_ONLY.
"""
return pulumi.get(self, "nat_ips")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
Region where the router and NAT reside.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def router(self) -> pulumi.Output[str]:
"""
The name of the Cloud Router in which this NAT will be configured.
"""
return pulumi.get(self, "router")
@property
@pulumi.getter(name="sourceSubnetworkIpRangesToNat")
def source_subnetwork_ip_ranges_to_nat(self) -> pulumi.Output[str]:
"""
How NAT should be configured per Subnetwork.
If `ALL_SUBNETWORKS_ALL_IP_RANGES`, all of the
IP ranges in every Subnetwork are allowed to Nat.
If `ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES`, all of the primary IP
ranges in every Subnetwork are allowed to Nat.
`LIST_OF_SUBNETWORKS`: A list of Subnetworks are allowed to Nat
(specified in the field subnetwork below). Note that if this field
contains ALL_SUBNETWORKS_ALL_IP_RANGES or
ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any
other RouterNat section in any Router for this network in this region.
Possible values are `ALL_SUBNETWORKS_ALL_IP_RANGES`, `ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES`, and `LIST_OF_SUBNETWORKS`.
"""
return pulumi.get(self, "source_subnetwork_ip_ranges_to_nat")
@property
@pulumi.getter
def subnetworks(self) -> pulumi.Output[Optional[List['outputs.RouterNatSubnetwork']]]:
"""
One or more subnetwork NAT configurations. Only used if
`source_subnetwork_ip_ranges_to_nat` is set to `LIST_OF_SUBNETWORKS`
Structure is documented below.
"""
return pulumi.get(self, "subnetworks")
@property
@pulumi.getter(name="tcpEstablishedIdleTimeoutSec")
def tcp_established_idle_timeout_sec(self) -> pulumi.Output[Optional[float]]:
"""
Timeout (in seconds) for TCP established connections.
Defaults to 1200s if not set.
"""
return pulumi.get(self, "tcp_established_idle_timeout_sec")
@property
@pulumi.getter(name="tcpTransitoryIdleTimeoutSec")
def tcp_transitory_idle_timeout_sec(self) -> pulumi.Output[Optional[float]]:
"""
Timeout (in seconds) for TCP transitory connections.
Defaults to 30s if not set.
"""
return pulumi.get(self, "tcp_transitory_idle_timeout_sec")
@property
@pulumi.getter(name="udpIdleTimeoutSec")
def udp_idle_timeout_sec(self) -> pulumi.Output[Optional[float]]:
"""
Timeout (in seconds) for UDP connections. Defaults to 30s if not set.
"""
return pulumi.get(self, "udp_idle_timeout_sec")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 52.986226
| 161
| 0.671155
| 2,442
| 19,234
| 5.013923
| 0.099509
| 0.066482
| 0.045737
| 0.023522
| 0.803822
| 0.772052
| 0.756534
| 0.731869
| 0.712431
| 0.70802
| 0
| 0.002211
| 0.247634
| 19,234
| 362
| 162
| 53.132597
| 0.843895
| 0.453052
| 0
| 0.300578
| 1
| 0
| 0.160121
| 0.079415
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109827
| false
| 0.00578
| 0.040462
| 0.011561
| 0.260116
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7af1fd0fce2287d73c91c00361146b409c776db4
| 10,890
|
py
|
Python
|
cogs/Punish.py
|
Aspen-The-Deer/Guardian-Deer
|
53cf4a135b033df11082ee63bf59a359a0f6d362
|
[
"MIT"
] | null | null | null |
cogs/Punish.py
|
Aspen-The-Deer/Guardian-Deer
|
53cf4a135b033df11082ee63bf59a359a0f6d362
|
[
"MIT"
] | null | null | null |
cogs/Punish.py
|
Aspen-The-Deer/Guardian-Deer
|
53cf4a135b033df11082ee63bf59a359a0f6d362
|
[
"MIT"
] | null | null | null |
import discord
import sys
import time
import random
import os
import json
import datetime
from discord.ext import commands
class Punishments(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
time.sleep(0.2)
print('Punish.py')
@commands.command(aliases =["Ban", "b", "B"])
@commands.has_permissions(ban_members=True)
async def ban (self, ctx, member:discord.User=None, reason:str=None):
logger = discord.utils.get(ctx.guild.channels, name='logs')
server = ctx.message.guild
mod = ctx.message.author.mention
embed2= discord.Embed(
colour=(0x629632),
title="You have been Banned:"
)
embed2.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed2.add_field(name="You have been banned from:", value=str(server), inline=False)
embed2.add_field(name="For the Reason:", value=str(reason), inline=False)
embed2.add_field(name="You were Banned By:", value=str(mod), inline=False)
embed2.set_footer(text="Type '>help' for help options!")
if member == None or member == ctx.message.author:
embed= discord.Embed(
colour=(0x629632),
title="User Cannot Banned:"
)
embed.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed.add_field(name="User:", value=str(member), inline=False)
embed.add_field(name="This happened because:", value="You cannot ban yourself.\nNo user was specified to ban.", inline=False)
embed.set_footer(text="Type '>help' for help options!")
await ctx.channel.send(embed=embed)
return
elif reason != None:
await ctx.guild.ban(member, reason=reason)
embed= discord.Embed(
colour=(0x629632),
title="User Banned:"
)
embed.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed.add_field(name="User:", value=str(member), inline=False)
embed.add_field(name="Reason:", value=str(reason), inline=False)
embed.add_field(name="Banned By:", value=str(mod), inline=False)
embed.set_footer(text="Type '>help' for help options!")
await ctx.channel.send(embed=embed)
elif reason == None:
reason = "No Reason Given"
await ctx.guild.ban(member, reason=reason)
embed= discord.Embed(
colour=(0x629632),
title="User Banned:"
)
embed.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed.add_field(name="User:", value=str(member), inline=False)
embed.add_field(name="Reason:", value=str(reason), inline=False)
embed.add_field(name="Banned By:", value=str(mod), inline=False)
embed.set_footer(text="Type '>help' for help options!")
await ctx.channel.send(embed=embed)
try:
await logger.send(embed=embed)
await member.send(embed=embed2)
except AttributeError:
print("No logging channel found in "+server+", Ignoring Event.")
await member.send(embed=embed2)
@ban.error
async def ban_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
embed3= discord.Embed(
colour=(0x629632),
title="Insufficient Permissions..."
)
embed3.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed3.add_field(name="You are missing the permissions required to use this command.", value="Error: #001", inline=False)
embed3.set_footer(text="Type '>help' for help options!")
await ctx.send(embed=embed3)
return
else:
print(error)
@commands.command(aliases =["Kick", "k", "K"])
@commands.has_permissions(kick_members=True)
async def kick (self, ctx, member:discord.User=None, reason:str=None):
logger = discord.utils.get(ctx.guild.channels, name='logs')
server = ctx.message.guild
mod = ctx.message.author.mention
embed2= discord.Embed(
colour=(0x629632),
title="You have been Kicked:"
)
embed2.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed2.add_field(name="You have been Kicked from:", value=str(server), inline=False)
embed2.add_field(name="For the Reason:", value=reason, inline=False)
embed2.add_field(name="You were kicked By:", value=str(mod), inline=False)
embed2.set_footer(text="Type '>help' for help options!")
if member == None or member == ctx.message.author:
embed= discord.Embed(
colour=(0x629632),
title="User Cannot Kicked:"
)
embed.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed.add_field(name="User:", value=str(member), inline=False)
embed.add_field(name="This happened because:", value="You cannot kick yourself.\nNo user was specified to kick.", inline=False)
embed.set_footer(text="Type '>help' for help options!")
await ctx.channel.send(embed=embed)
elif reason != None:
await ctx.guild.kick(member, reason=reason)
embed= discord.Embed(
colour=(0x629632),
title="User Kicked:"
)
embed.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed.add_field(name="User:", value=str(member), inline=False)
embed.add_field(name="Reason:", value=reason, inline=False)
embed.add_field(name="Kicked By:", value=str(mod), inline=False)
embed.set_footer(text="Type '>help' for help options!")
await ctx.channel.send(embed=embed)
embed2= discord.Embed(
colour=(0x629632),
title="You have been Kicked:"
)
embed2.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed2.add_field(name="You have been Kicked from:", value=str(server), inline=False)
embed2.add_field(name="For the Reason:", value=reason, inline=False)
embed2.add_field(name="You were kicked By:", value=str(mod), inline=False)
embed2.set_footer(text="Type '>help' for help options!")
await member.send(embed=embed2)
elif reason == None:
reason = "No Reason Given"
await ctx.guild.kick(member, reason=reason)
embed= discord.Embed(
colour=(0x629632),
title="User Kicked:"
)
embed.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed.add_field(name="User:", value=str(member), inline=False)
embed.add_field(name="Reason:", value=str(reason), inline=False)
embed.add_field(name="Kicked By:", value=str(mod), inline=False)
embed.set_footer(text="Type '>help' for help options!")
await ctx.channel.send(embed=embed)
try:
await logger.send(embed=embed)
await member.send(embed=embed2)
except AttributeError:
print("No logging channel found in "+server+", Ignoring Event.")
await member.send(embed=embed2)
@kick.error
async def kick_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
embed3= discord.Embed(
colour=(0x629632),
title="Insufficient Permissions..."
)
embed3.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed3.add_field(name="You are missing the permissions required to use this command.", value="Error: #001", inline=False)
embed3.set_footer(text="Type '>help' for help options!")
await ctx.send(embed=embed3)
return
else:
print(error)
@commands.command(aliases =["Unban", "u", "U"])
@commands.has_permissions(ban_members=True)
@commands.guild_only()
async def unban(self, ctx, *, userId):
logger = discord.utils.get(ctx.guild.channels, name='logs')
mod = ctx.message.author.mention
server = ctx.message.guild
user = discord.Object(id=userId)
await ctx.guild.unban(user)
embed= discord.Embed(
colour=(0x629632)
)
embed.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed.add_field(name="User Pardoned by:", value=str(mod), inline=False)
embed.set_footer(text="Type '>help' for help options!")
await ctx.channel.send(embed=embed)
try:
await logger.send(embed=embed)
except AttributeError:
print("No logging channel found in "+server+", Ignoring Event.")
return
@unban.error
async def unban_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
embed3= discord.Embed(
colour=(0x629632),
title="Insufficient Permissions..."
)
embed3.set_author(name="Guardian Deer", icon_url="https://cdn.discordapp.com/avatars/606855758612660327/98b13ab2d31342848754caa909a653da.png?size=1024")
embed3.add_field(name="You are missing the permissions required to use this command.", value="Error: #001", inline=False)
embed3.set_footer(text="Type '>help' for help options!")
await ctx.send(embed=embed3)
return
else:
print(error)
def setup(client):
client.add_cog(Punishments(client))
| 46.340426
| 164
| 0.628926
| 1,266
| 10,890
| 5.343602
| 0.110585
| 0.034294
| 0.051441
| 0.04272
| 0.912934
| 0.901404
| 0.879675
| 0.878788
| 0.878788
| 0.865188
| 0
| 0.086829
| 0.247016
| 10,890
| 235
| 165
| 46.340426
| 0.738171
| 0
| 0
| 0.719212
| 0
| 0
| 0.271784
| 0
| 0
| 0
| 0.009549
| 0
| 0
| 1
| 0.009852
| false
| 0
| 0.039409
| 0
| 0.078818
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bb27099e35560831799e88b7b41511cc784afc17
| 3,122
|
py
|
Python
|
POM/user.py
|
Mikhail-QA/IU
|
ec792502b0a453b410e6c59f38a42b541564a776
|
[
"Apache-2.0"
] | null | null | null |
POM/user.py
|
Mikhail-QA/IU
|
ec792502b0a453b410e6c59f38a42b541564a776
|
[
"Apache-2.0"
] | null | null | null |
POM/user.py
|
Mikhail-QA/IU
|
ec792502b0a453b410e6c59f38a42b541564a776
|
[
"Apache-2.0"
] | null | null | null |
# authorization user not abonement
class AutopaymentMailRu(object):
def __init__(self, driver):
self.driver = driver
def enter_email(self, user_name="autopayment@mail.ru"):
self.driver.find_element_by_xpath("//div/label[1]/input").send_keys(user_name)
def enter_password(self, password="123456"):
self.driver.find_element_by_xpath("//div/label[2]/input").send_keys(password)
# registration user buy subscription
class PaymentnotMailRu(object):
def __init__(self, driver):
self.driver = driver
def reg_enter_email(self, user_name="payment.not@mail.ru"):
self.driver.find_element_by_xpath("//div/label[1]/input").send_keys(user_name)
def reg_enter_password(self, password="123456"):
self.driver.find_element_by_xpath("//div/label[2]/input").send_keys(password)
# registration user with abonement
class PaymNotYandexRu(object):
def __init__(self, driver):
self.driver = driver
def reg_enter_email(self, user_name="paym.not@yandex.ru"):
self.driver.find_element_by_xpath("//div/label[1]/input").send_keys(user_name)
def reg_enter_password(self, password="123456"):
self.driver.find_element_by_xpath("//div/label[2]/input").send_keys(password)
def enter_email(self, user_name="paym.not@yandex.ru"):
self.driver.find_element_by_xpath("//div/label[1]/input").send_keys(user_name)
def enter_password(self, password="123456"):
self.driver.find_element_by_xpath("//div/label[2]/input").send_keys(password)
# registration user with abonement
class VratchGlavYandexRu(object):
def __init__(self, driver):
self.driver = driver
def reg_enter_email(self, user_name="vratch.glav@yandex.ru"):
self.driver.find_element_by_xpath("//div/label[1]/input").send_keys(user_name)
def reg_enter_password(self, password="123456"):
self.driver.find_element_by_xpath("//div/label[2]/input").send_keys(password)
def enter_email(self, user_name="vratch.glav@yandex.ru"):
self.driver.find_element_by_xpath("//div/label[1]/input").send_keys(user_name)
def enter_password(self, password="123456"):
self.driver.find_element_by_xpath("//div/label[2]/input").send_keys(password)
# authorization admin user
class Admin(object):
def __init__(self, driver):
self.driver = driver
def enter_email(self, user_name):
self.driver.find_element_by_name("user[email]").clear()
self.driver.find_element_by_name("user[email]").send_keys("%s" % user_name)
def enter_password(self, password):
self.driver.find_element_by_id("user_password").clear()
self.driver.find_element_by_id("user_password").send_keys("%s" % password)
class IuUseryopmail(object):
def __init__(self, driver):
self.driver = driver
def reg_enter_email(self, user_name="iuuser@yopmail.com"):
self.driver.find_element_by_xpath("//div/label[1]/input").send_keys(user_name)
def reg_enter_password(self, password="123456"):
self.driver.find_element_by_xpath("//div/label[2]/input").send_keys(password)
| 36.729412
| 86
| 0.714606
| 440
| 3,122
| 4.761364
| 0.115909
| 0.143198
| 0.120286
| 0.18043
| 0.888783
| 0.888783
| 0.88401
| 0.866826
| 0.797136
| 0.797136
| 0
| 0.02095
| 0.143818
| 3,122
| 84
| 87
| 37.166667
| 0.762813
| 0.050609
| 0
| 0.634615
| 0
| 0
| 0.171738
| 0.014199
| 0
| 0
| 0
| 0
| 0
| 1
| 0.423077
| false
| 0.326923
| 0
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
2475acbb21143beca1d0740977818f72c72f50fa
| 6,127
|
py
|
Python
|
aioamqp/tests/test_exchange.py
|
tkukushkin/aioamqp
|
b4f01209794122a4ec3b5d8d437cb5739641fb3e
|
[
"BSD-3-Clause"
] | 284
|
2015-01-08T20:05:07.000Z
|
2022-03-28T10:07:31.000Z
|
aioamqp/tests/test_exchange.py
|
tkukushkin/aioamqp
|
b4f01209794122a4ec3b5d8d437cb5739641fb3e
|
[
"BSD-3-Clause"
] | 179
|
2015-02-16T09:27:53.000Z
|
2022-03-30T16:01:52.000Z
|
aioamqp/tests/test_exchange.py
|
tkukushkin/aioamqp
|
b4f01209794122a4ec3b5d8d437cb5739641fb3e
|
[
"BSD-3-Clause"
] | 111
|
2015-02-15T00:27:58.000Z
|
2022-01-13T05:08:01.000Z
|
"""
Amqp exchange class tests
"""
import asynctest
from . import testcase
from .. import exceptions
class ExchangeDeclareTestCase(testcase.RabbitTestCaseMixin, asynctest.TestCase):
_multiprocess_can_split_ = True
async def test_exchange_direct_declare(self):
result = await self.channel.exchange_declare(
'exchange_name', type_name='direct')
self.assertTrue(result)
async def test_exchange_fanout_declare(self):
result = await self.channel.exchange_declare(
'exchange_name', type_name='fanout')
self.assertTrue(result)
async def test_exchange_topic_declare(self):
result = await self.channel.exchange_declare(
'exchange_name', type_name='topic')
self.assertTrue(result)
async def test_exchange_headers_declare(self):
result = await self.channel.exchange_declare(
'exchange_name', type_name='headers')
self.assertTrue(result)
async def test_exchange_declare_wrong_types(self):
result = await self.channel.exchange_declare(
'exchange_name', type_name='headers',
auto_delete=True, durable=True)
self.assertTrue(result)
with self.assertRaises(exceptions.ChannelClosed):
result = await self.channel.exchange_declare(
'exchange_name', type_name='fanout',
auto_delete=False, durable=False)
async def test_exchange_declare_passive(self):
result = await self.channel.exchange_declare(
'exchange_name', type_name='headers',
auto_delete=True, durable=True)
self.assertTrue(result)
result = await self.channel.exchange_declare(
'exchange_name', type_name='headers',
auto_delete=True, durable=True, passive=True)
self.assertTrue(result)
result = await self.channel.exchange_declare(
'exchange_name', type_name='headers',
auto_delete=False, durable=False, passive=True)
self.assertTrue(result)
async def test_exchange_declare_passive_does_not_exists(self):
with self.assertRaises(exceptions.ChannelClosed) as cm:
await self.channel.exchange_declare(
'non_existant_exchange',
type_name='headers',
auto_delete=False, durable=False, passive=True)
self.assertEqual(cm.exception.code, 404)
async def test_exchange_declare_unknown_type(self):
with self.assertRaises(exceptions.ChannelClosed):
await self.channel.exchange_declare(
'non_existant_exchange',
type_name='unknown_type',
auto_delete=False, durable=False, passive=True)
class ExchangeDelete(testcase.RabbitTestCaseMixin, asynctest.TestCase):
async def test_delete(self):
exchange_name = 'exchange_name'
await self.channel.exchange_declare(exchange_name, type_name='direct')
result = await self.channel.exchange_delete(exchange_name)
self.assertTrue(result)
with self.assertRaises(exceptions.ChannelClosed) as cm:
await self.channel.exchange_declare(
exchange_name, type_name='direct', passive=True
)
self.assertEqual(cm.exception.code, 404)
async def test_double_delete(self):
exchange_name = 'exchange_name'
await self.channel.exchange_declare(exchange_name, type_name='direct')
result = await self.channel.exchange_delete(exchange_name)
self.assertTrue(result)
if self.server_version() < (3, 3, 5):
with self.assertRaises(exceptions.ChannelClosed) as cm:
await self.channel.exchange_delete(exchange_name)
self.assertEqual(cm.exception.code, 404)
else:
# weird result from rabbitmq 3.3.5
result = await self.channel.exchange_delete(exchange_name)
self.assertTrue(result)
class ExchangeBind(testcase.RabbitTestCaseMixin, asynctest.TestCase):
async def test_exchange_bind(self):
await self.channel.exchange_declare('exchange_destination', type_name='direct')
await self.channel.exchange_declare('exchange_source', type_name='direct')
result = await self.channel.exchange_bind(
'exchange_destination', 'exchange_source', routing_key='')
self.assertTrue(result)
async def test_inexistant_exchange_bind(self):
with self.assertRaises(exceptions.ChannelClosed) as cm:
await self.channel.exchange_bind(
'exchange_destination', 'exchange_source', routing_key='')
self.assertEqual(cm.exception.code, 404)
class ExchangeUnbind(testcase.RabbitTestCaseMixin, asynctest.TestCase):
async def test_exchange_unbind(self):
ex_source = 'exchange_source'
ex_destination = 'exchange_destination'
await self.channel.exchange_declare(ex_destination, type_name='direct')
await self.channel.exchange_declare(ex_source, type_name='direct')
await self.channel.exchange_bind(
ex_destination, ex_source, routing_key='')
await self.channel.exchange_unbind(
ex_destination, ex_source, routing_key='')
async def test_exchange_unbind_reversed(self):
ex_source = 'exchange_source'
ex_destination = 'exchange_destination'
await self.channel.exchange_declare(ex_destination, type_name='direct')
await self.channel.exchange_declare(ex_source, type_name='direct')
await self.channel.exchange_bind(
ex_destination, ex_source, routing_key='')
if self.server_version() < (3, 3, 5):
with self.assertRaises(exceptions.ChannelClosed) as cm:
result = await self.channel.exchange_unbind(
ex_source, ex_destination, routing_key='')
self.assertEqual(cm.exception.code, 404)
else:
# weird result from rabbitmq 3.3.5
result = await self.channel.exchange_unbind(ex_source, ex_destination, routing_key='')
self.assertTrue(result)
| 37.588957
| 98
| 0.675698
| 674
| 6,127
| 5.896142
| 0.108309
| 0.070206
| 0.124811
| 0.187217
| 0.906895
| 0.882486
| 0.82612
| 0.77227
| 0.689733
| 0.675642
| 0
| 0.005757
| 0.234536
| 6,127
| 162
| 99
| 37.820988
| 0.841578
| 0.015016
| 0
| 0.646552
| 0
| 0
| 0.081534
| 0.006974
| 0
| 0
| 0
| 0
| 0.215517
| 1
| 0
| false
| 0.060345
| 0.025862
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
24c8633e4dff63088afb3aadc34f2d5b0636b7a8
| 93
|
py
|
Python
|
decisiorama/__init__.py
|
j-chacon/Hartmann_contaminants
|
316d543efcdc0bcc4442c56fda6748b405ca2e22
|
[
"MIT"
] | null | null | null |
decisiorama/__init__.py
|
j-chacon/Hartmann_contaminants
|
316d543efcdc0bcc4442c56fda6748b405ca2e22
|
[
"MIT"
] | null | null | null |
decisiorama/__init__.py
|
j-chacon/Hartmann_contaminants
|
316d543efcdc0bcc4442c56fda6748b405ca2e22
|
[
"MIT"
] | null | null | null |
from decisiorama import pda
from decisiorama import sensitivity
from decisiorama import utils
| 31
| 35
| 0.88172
| 12
| 93
| 6.833333
| 0.5
| 0.54878
| 0.768293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11828
| 93
| 3
| 36
| 31
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
24f7c70905a68a25e9698adbcf4130537c5fbde0
| 20,996
|
py
|
Python
|
serial_scripts/vrouter/test_session_logging.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | null | null | null |
serial_scripts/vrouter/test_session_logging.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | 1
|
2021-06-01T22:18:29.000Z
|
2021-06-01T22:18:29.000Z
|
serial_scripts/vrouter/test_session_logging.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | null | null | null |
from tcutils.wrappers import preposttest_wrapper
from common.sessionlogging.base import *
import test
import random
from tcutils.util import skip_because
AF_TEST = 'v6'
class SessionLogging(SessionLoggingBase):
@classmethod
def setUpClass(cls):
super(SessionLogging, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SessionLogging, cls).tearDownClass()
def _test_logging_intra_node(self):
self._create_resources(test_type='intra-node')
#For intra node traffic there is no tunnel so underlay_proto would be zero
underlay_proto = 0
proto_list = [1, 17, 6]
self.enable_logging_on_compute(self.client_fixture.vm_node_ip,
log_type=AGENT_LOG)
#Clear local ips after agent restart
self.client_fixture.clear_local_ips()
self.server_fixture.clear_local_ips()
#Verify Session logs in agent logs
for proto in proto_list:
self.start_traffic_validate_sessions(self.client_fixture,
self.server_fixture, self.policy_fixture, proto=proto,
underlay_proto=underlay_proto)
self.logger.info("Expected Session logs found in agent log for "
"protocol %s" % (proto))
self.enable_logging_on_compute(self.client_fixture.vm_node_ip,
log_type=SYS_LOG)
#Clear local ips after agent restart
self.client_fixture.clear_local_ips()
self.server_fixture.clear_local_ips()
#Verify Session logs in syslog
for proto in proto_list:
self.start_traffic_validate_sessions_in_syslog(self.client_fixture,
self.server_fixture, self.policy_fixture, proto=proto,
underlay_proto=underlay_proto)
self.logger.info("Expected Session logs found in syslog for "
"protocol %s" % (proto))
def _test_logging_inter_node(self):
self._create_resources(test_type='inter-node')
underlay_proto = UNDERLAY_PROTO[
self.connections.read_vrouter_config_encap()[0]]
proto_list = [1, 17, 6]
self.enable_logging_on_compute(self.client_fixture.vm_node_ip,
log_type=AGENT_LOG)
self.enable_logging_on_compute(self.server_fixture.vm_node_ip,
log_type=AGENT_LOG)
#Clear local ips after agent restart
self.client_fixture.clear_local_ips()
self.server_fixture.clear_local_ips()
#Verify Session logs in agent logs
for proto in proto_list:
self.start_traffic_validate_sessions(self.client_fixture,
self.server_fixture, self.policy_fixture, proto=proto,
underlay_proto=underlay_proto)
self.logger.info("Expected Session logs found in agent log for "
"protocol %s" % (proto))
self.enable_logging_on_compute(self.client_fixture.vm_node_ip,
log_type=SYS_LOG)
self.enable_logging_on_compute(self.server_fixture.vm_node_ip,
log_type=SYS_LOG)
#Clear local ips after agent restart
self.client_fixture.clear_local_ips()
self.server_fixture.clear_local_ips()
#Verify Session logs in syslog
for proto in proto_list:
self.start_traffic_validate_sessions_in_syslog(self.client_fixture,
self.server_fixture, self.policy_fixture, proto=proto,
underlay_proto=underlay_proto)
self.logger.info("Expected Session logs found in syslog for "
"protocol %s" % (proto))
@preposttest_wrapper
def test_local_logging_intra_node(self):
"""
Description: Verify sessions logged for inter-VN intra-Node traffic
Steps:
1. create 2 VNs and connect them using policy
2. launch 1 VM in each VN on same compute node
3. start icmp/tcp/udp traffic and verify the session logs in agent log
as well as in syslog
Pass criteria:
step 3 should pass
"""
self._test_logging_intra_node()
@preposttest_wrapper
def test_local_logging_inter_node(self):
"""
Description: Verify sessions logged for inter-VN inter-Node traffic
Steps:
1. create 2 VNs and connect them using policy
2. launch 1 VM in each VN on different compute nodes
3. start icmp/tcp/udp traffic and verify the session logs in agent log
as well as in syslog
Pass criteria:
step 3 should pass
"""
self._test_logging_inter_node()
@preposttest_wrapper
def test_client_session_aggregation(self):
"""
Description: Verify client sessions aggregation for tcp and udp
"""
self._create_resources(test_type='inter-node', no_of_server=3)
underlay_proto = UNDERLAY_PROTO[
self.connections.read_vrouter_config_encap()[0]]
self.enable_logging_on_compute(self.client_fixture.vm_node_ip,
log_type=AGENT_LOG)
self.enable_logging_on_compute(self.server_fixture.vm_node_ip,
log_type=AGENT_LOG)
#Clear local ips after agent restart
for vm in self.client_fixtures + self.server_fixtures:
vm.clear_local_ips()
pkt_count = 100
client_port = random.randint(12000, 65000)
service_port = client_port + 1
hping3_obj = {}
traffic_stats = {}
project_fqname = ':'.join(self.project.project_fq_name)
client_vmi_fqname = project_fqname + ':' +\
self.client_fixture.vmi_ids[self.client_fixture.vn_fq_name]
server_vn_fq_name = self.server_fixture.vn_fq_name
is_client_session = 1
policy_api_obj = self.vnc_lib.network_policy_read(
id=self.policy_fixture.get_id())
nw_ace_uuid = policy_api_obj.get_network_policy_entries(
).policy_rule[0].rule_uuid
interval = 1
tcp_flags = 0
for proto in [17, 6]:
traffic_stats[proto] = {}
hping3_obj[proto] = {}
udp = True if proto == 17 else False
#Start the traffic
for idx, server in enumerate(self.server_fixtures):
hping3_obj[proto][server] = self.send_hping3_traffic(
self.client_fixture, server.vm_ip, client_port+idx,
service_port, count=pkt_count, interval=interval,
wait=False, stop=False, udp=udp, keep=True)[0]
if proto != 6:
expected_client_session = SESSION_CLIENT_AGGR % (
client_vmi_fqname,#Client vmi name
self.client_fixture.vn_fq_name, FIREWALL_RULE_ID_DEFAULT,
server_vn_fq_name, is_client_session, 0,
self.client_fixture.vm_node_ip,
self.client_fixture.vm_ip, service_port, proto,#Session agg
INT_RE, INT_RE, INT_RE, INT_RE,
self.server_fixtures[0].vm_ip, client_port,#Server1
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.client_fixture.vm_id,#Client vm ID
self.server_fixtures[0].vm_node_ip, underlay_proto,
self.server_fixtures[1].vm_ip, client_port+1,#Server2
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.client_fixture.vm_id,
self.server_fixtures[1].vm_node_ip, underlay_proto,
self.server_fixtures[2].vm_ip, client_port+2,#Server3
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.client_fixture.vm_id,
self.server_fixtures[2].vm_node_ip, underlay_proto)
#Verify session aggregation on client node
result, output = self.search_session_in_agent_log(
self.client_fixture.vm_node_ip,
expected_client_session)
assert result, ("Expected client session not found in agent log "
"for protocol %s" % (proto))
#Stop the traffic
for idx, server in enumerate(self.server_fixtures):
traffic_stats[proto][server] = hping3_obj[proto][server].stop()[0]
#Delete all the flows
self.delete_all_flows_on_vms_compute(
self.client_fixtures + self.server_fixtures)
if proto == 6:
pkt_count1 = pkt_count2 = pkt_count3 = 1
else:
pkt_count1 = traffic_stats[proto][self.server_fixtures[0]]['sent']
pkt_count2 = traffic_stats[proto][self.server_fixtures[1]]['sent']
pkt_count3 = traffic_stats[proto][self.server_fixtures[2]]['sent']
expected_client_session = SESSION_CLIENT_AGGR_TEARDOWN % (
client_vmi_fqname,
self.client_fixture.vn_fq_name, FIREWALL_RULE_ID_DEFAULT,
server_vn_fq_name, is_client_session, 0,
self.client_fixture.vm_node_ip,
self.client_fixture.vm_ip, service_port, proto,
INT_RE, INT_RE, INT_RE, INT_RE,
self.server_fixtures[0].vm_ip, client_port,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid,
self.client_fixture.vm_id,
self.server_fixtures[0].vm_node_ip, underlay_proto,
self.server_fixtures[1].vm_ip, client_port+1,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count2,
'pass', UUID_RE, nw_ace_uuid,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count2,
'pass', UUID_RE, nw_ace_uuid,
self.client_fixture.vm_id,
self.server_fixtures[1].vm_node_ip, underlay_proto,
self.server_fixtures[2].vm_ip, client_port+2,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count3,
'pass', UUID_RE, nw_ace_uuid,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count3,
'pass', UUID_RE, nw_ace_uuid,
self.client_fixture.vm_id,
self.server_fixtures[2].vm_node_ip, underlay_proto)
#Verify teardown session after deleting the flows
result, output = self.search_session_in_agent_log(
self.client_fixture.vm_node_ip,
expected_client_session)
if ((not result) and (proto == 6)):
expected_client_session = SESSION_CLIENT_AGGR_TEARDOWN_TCP % (
client_vmi_fqname,
self.client_fixture.vn_fq_name, FIREWALL_RULE_ID_DEFAULT,
server_vn_fq_name, is_client_session, 0,
self.client_fixture.vm_node_ip,
self.client_fixture.vm_ip, service_port, proto,
INT_RE, INT_RE, INT_RE, INT_RE,
self.server_fixtures[0].vm_ip, client_port,#Server1
INT_RE, pkt_count1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, pkt_count1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.client_fixture.vm_id,
self.server_fixtures[0].vm_node_ip, underlay_proto,
self.server_fixtures[1].vm_ip, client_port+1,#Server2
INT_RE, pkt_count1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, pkt_count1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.client_fixture.vm_id,
self.server_fixtures[1].vm_node_ip, underlay_proto,
self.server_fixtures[2].vm_ip, client_port+2,#Server3
INT_RE, pkt_count1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, pkt_count1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.client_fixture.vm_id,
self.server_fixtures[2].vm_node_ip, underlay_proto)
result_tcp, output = self.search_session_in_agent_log(
self.client_fixture.vm_node_ip,
expected_client_session)
result = result or result_tcp
assert result, ("Expected client session not found in agent log "
"for protocol %s" % (proto))
self.logger.info("Expected Session logs found in agent log for "
"protocol %s" % (proto))
@preposttest_wrapper
def test_server_session_aggregation(self):
"""
Description: Verify server sessions aggregation
"""
self._create_resources(test_type='inter-node', no_of_client=3)
underlay_proto = UNDERLAY_PROTO[
self.connections.read_vrouter_config_encap()[0]]
self.enable_logging_on_compute(self.client_fixture.vm_node_ip,
log_type=AGENT_LOG)
self.enable_logging_on_compute(self.server_fixture.vm_node_ip,
log_type=AGENT_LOG)
#Clear local ips after agent restart
for vm in self.client_fixtures + self.server_fixtures:
vm.clear_local_ips()
pkt_count = 100
client_port = random.randint(12000, 65000)
service_port = client_port + 1
hping3_obj = {}
traffic_stats = {}
project_fqname = ':'.join(self.project.project_fq_name)
server_vmi_fqname = project_fqname + ':' +\
self.server_fixture.vmi_ids[self.server_fixture.vn_fq_name]
client_vn_fq_name = self.client_fixture.vn_fq_name
is_client_session = 0
policy_api_obj = self.vnc_lib.network_policy_read(
id=self.policy_fixture.get_id())
nw_ace_uuid = policy_api_obj.get_network_policy_entries(
).policy_rule[0].rule_uuid
interval = 1
tcp_flags = 0
proto = 17
traffic_stats[proto] = {}
hping3_obj[proto] = {}
udp = True if proto == 17 else False
#Start the traffic
for idx, client in enumerate(self.client_fixtures):
hping3_obj[proto][client] = self.send_hping3_traffic(
client, self.server_fixture.vm_ip, client_port+idx,
service_port, count=pkt_count, interval=interval,
wait=False, stop=False, udp=udp, keep=True)[0]
expected_server_session = SESSION_SERVER_AGGR % (
server_vmi_fqname,#Server vmi name
self.server_fixture.vn_fq_name, FIREWALL_RULE_ID_DEFAULT,
client_vn_fq_name, is_client_session, 0,
self.server_fixture.vm_node_ip,
self.server_fixture.vm_ip, service_port, proto,#Session agg
INT_RE, INT_RE, INT_RE, INT_RE,
self.client_fixtures[0].vm_ip, client_port,#Client1
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.server_fixture.vm_id,#Server vm ID
self.client_fixtures[0].vm_node_ip, underlay_proto,
self.client_fixtures[1].vm_ip, client_port+1,#Client2
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.server_fixture.vm_id,
self.client_fixtures[1].vm_node_ip, underlay_proto,
self.client_fixtures[2].vm_ip, client_port+2,#Client3
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Fwd flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
INT_RE, 1, UUID_RE, tcp_flags, INT_RE,#Reverse flow info
'pass', UUID_RE, nw_ace_uuid, INT_RE,
self.server_fixture.vm_id,
self.client_fixtures[2].vm_node_ip, underlay_proto)
#Verify session aggregation on client node
result, output = self.search_session_in_agent_log(
self.server_fixture.vm_node_ip,
expected_server_session)
assert result, ("Expected server session not found in agent log "
"for protocol %s" % (proto))
#Stop the traffic
for idx, client in enumerate(self.client_fixtures):
traffic_stats[proto][client] = hping3_obj[proto][client].stop()[0]
#Delete all the flows
self.delete_all_flows_on_vms_compute(
self.client_fixtures + self.server_fixtures)
pkt_count1 = traffic_stats[proto][self.client_fixtures[0]]['sent']
pkt_count2 = traffic_stats[proto][self.client_fixtures[1]]['sent']
pkt_count3 = traffic_stats[proto][self.client_fixtures[2]]['sent']
expected_server_session = SESSION_CLIENT_AGGR_TEARDOWN % (
server_vmi_fqname,
self.server_fixture.vn_fq_name, FIREWALL_RULE_ID_DEFAULT,
client_vn_fq_name, is_client_session, 0,
self.server_fixture.vm_node_ip,
self.server_fixture.vm_ip, service_port, proto,
INT_RE, INT_RE, INT_RE, INT_RE,
self.client_fixtures[0].vm_ip, client_port,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count1,
'pass', UUID_RE, nw_ace_uuid,
self.server_fixture.vm_id,
self.client_fixtures[0].vm_node_ip, underlay_proto,
self.client_fixtures[1].vm_ip, client_port+1,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count2,
'pass', UUID_RE, nw_ace_uuid,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count2,
'pass', UUID_RE, nw_ace_uuid,
self.server_fixture.vm_id,
self.client_fixtures[1].vm_node_ip, underlay_proto,
self.client_fixtures[2].vm_ip, client_port+2,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count3,
'pass', UUID_RE, nw_ace_uuid,
UUID_RE, INT_RE, INT_RE, INT_RE, pkt_count3,
'pass', UUID_RE, nw_ace_uuid,
self.server_fixture.vm_id,
self.client_fixtures[2].vm_node_ip, underlay_proto)
#Verify teardown session after deleting the flows
result, output = self.search_session_in_agent_log(
self.server_fixture.vm_node_ip,
expected_server_session)
assert result, ("Expected server session not found in agent log "
"for protocol %s" % (proto))
self.logger.info("Expected Session logs found in agent log for "
"protocol %s" % (proto))
class SessionLoggingIpv6(SessionLogging):
@classmethod
def setUpClass(cls):
super(SessionLoggingIpv6, cls).setUpClass()
cls.inputs.set_af(AF_TEST)
def is_test_applicable(self):
if (self.inputs.orchestrator == 'vcenter') and (
not self.orch.is_feature_supported('ipv6')):
return(False, 'Skipping IPv6 Test on vcenter setup')
return (True, None)
@preposttest_wrapper
@skip_because(address_family = 'v6')
def test_client_session_aggregation(self):
'''
This test uses hping3 utils which does not support ipv6, so need to skip
'''
super(SessionLoggingIpv6, self).test_client_session_aggregation()
@preposttest_wrapper
@skip_because(address_family = 'v6')
def test_server_session_aggregation(self):
'''
This test uses hping3 utils which does not support ipv6, so need to skip
'''
super(SessionLoggingIpv6, self).test_server_session_aggregation()
| 46.246696
| 85
| 0.618165
| 2,743
| 20,996
| 4.374408
| 0.076559
| 0.050838
| 0.038503
| 0.045004
| 0.887991
| 0.869072
| 0.845237
| 0.826069
| 0.814568
| 0.776898
| 0
| 0.014112
| 0.301391
| 20,996
| 453
| 86
| 46.348786
| 0.803927
| 0.095256
| 0
| 0.816901
| 0
| 0
| 0.043762
| 0
| 0
| 0
| 0
| 0
| 0.011268
| 1
| 0.033803
| false
| 0.084507
| 0.014085
| 0
| 0.056338
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
70049ed00e4c0b7633c512741073cd70fa0db15c
| 166
|
py
|
Python
|
yawn/models.py
|
rsalmaso/yawn
|
5ede07f9015468481c941f8221cd70c99be6e895
|
[
"MIT"
] | 28
|
2017-01-20T15:07:20.000Z
|
2022-02-18T17:25:38.000Z
|
yawn/models.py
|
rsalmaso/yawn
|
5ede07f9015468481c941f8221cd70c99be6e895
|
[
"MIT"
] | 83
|
2017-01-26T16:28:21.000Z
|
2022-03-08T23:48:04.000Z
|
yawn/models.py
|
rsalmaso/yawn
|
5ede07f9015468481c941f8221cd70c99be6e895
|
[
"MIT"
] | 5
|
2017-01-23T00:19:38.000Z
|
2020-05-12T15:36:59.000Z
|
# a hack to store the models in subfolders
from yawn.task.models import * # noqa
from yawn.worker.models import * # noqa
from yawn.workflow.models import * # noqa
| 33.2
| 42
| 0.740964
| 26
| 166
| 4.730769
| 0.576923
| 0.195122
| 0.390244
| 0.325203
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 166
| 4
| 43
| 41.5
| 0.904412
| 0.331325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
700e664ee0e556361e34af746991946bb25de7b5
| 726
|
py
|
Python
|
test.py
|
bastcazaux/search_myers_IUPAC
|
8fb3dba7c2313d103bf9eeb6a739e5b27e4ded58
|
[
"CECILL-B"
] | 3
|
2020-10-15T09:35:30.000Z
|
2020-12-28T03:02:32.000Z
|
test.py
|
bastcazaux/search_myers_IUPAC
|
8fb3dba7c2313d103bf9eeb6a739e5b27e4ded58
|
[
"CECILL-B"
] | null | null | null |
test.py
|
bastcazaux/search_myers_IUPAC
|
8fb3dba7c2313d103bf9eeb6a739e5b27e4ded58
|
[
"CECILL-B"
] | 2
|
2021-02-02T10:45:48.000Z
|
2021-08-04T08:41:38.000Z
|
import search_myers_IUPAC
print("search_myers_IUPAC.listofpositions('HACTADGTRTG','HYC', 1)")
print(search_myers_IUPAC.listofpositions('HACTADGTRTG','HYC', 1))
print("search_myers_IUPAC.listofbestpositions('HACTADGTRTG','HYCAC')")
print(search_myers_IUPAC.listofbestpositions('HACTADGTRTG','HYCAC'))
print("search_myers_IUPAC.backtrackpositions('HACTADGTRTG','HYCG', 1,10)")
print(search_myers_IUPAC.backtrackpositions('HACTADGTRTG','HYCG', 1,10))
print("search_myers_IUPAC.backtrackbestposition('HACTADGTRTG','HYCG', 1,10)")
print(search_myers_IUPAC.backtrackbestposition('HACTADGTRTG','HYCG', 1,10))
print("search_myers_IUPAC.tag('HACTADGTRTG','HYCG', 1,8)")
print(search_myers_IUPAC.tag('HACTADGTRTG','HYCG', 1,8))
#
| 38.210526
| 77
| 0.790634
| 88
| 726
| 6.272727
| 0.193182
| 0.219203
| 0.318841
| 0.380435
| 0.960145
| 0.960145
| 0.960145
| 0.960145
| 0.960145
| 0.849638
| 0
| 0.025751
| 0.03719
| 726
| 18
| 78
| 40.333333
| 0.763949
| 0
| 0
| 0
| 0
| 0
| 0.518621
| 0.387586
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.090909
| 0
| 0.090909
| 0.909091
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
703459fc2adb300c9565840ab76077f238507d5f
| 4,063
|
py
|
Python
|
falcon_call.py
|
ashish-1801/falcon2.0
|
2560d0de4bd2dd138c0af985af5ea98beb3021b9
|
[
"MIT"
] | null | null | null |
falcon_call.py
|
ashish-1801/falcon2.0
|
2560d0de4bd2dd138c0af985af5ea98beb3021b9
|
[
"MIT"
] | null | null | null |
falcon_call.py
|
ashish-1801/falcon2.0
|
2560d0de4bd2dd138c0af985af5ea98beb3021b9
|
[
"MIT"
] | null | null | null |
import csv
import requests
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
def falcon2_call(text,mode='short'):
try:
text=text.replace('"','')
text=text.replace("'","")
if mode=='short':
url = 'https://labs.tib.eu/falcon/falcon2/api?mode=short&db=1'
entities_wikidata=[]
entities_db=[]
payload = '{"text":"'+text+'"}'
r = requests.post(url, data=payload.encode('utf-8'), headers=headers)
if r.status_code == 200:
response=r.json()
#print(response)
for result in response['entities_wikidata']:
entities_wikidata.append(result[0])
for result in response['entities_dbpedia']:
entities_db.append(result[0])
else:
r = requests.post(url, data=payload.encode('utf-8'), headers=headers)
if r.status_code == 200:
response=r.json()
for result in response['entities_wikidata']:
entities_wikidata.append(result[0])
for result in response['entities_dbpedia']:
entities_db.append(result[0])
if len(entities_wikidata)>0:
entities_wikidata=entities_wikidata[0].replace('<','').replace('>','')
if len(entities_db)>0:
entities_db=entities_db[0]
return entities_wikidata,entities_db
else:
url = 'https://labs.tib.eu/falcon/falcon2/api?mode=long&db=1'
entities_wikidata=[]
entities_db=[]
payload = '{"text":"'+text+'"}'
r = requests.post(url, data=payload.encode('utf-8'), headers=headers)
if r.status_code == 200:
response=r.json()
#print(response)
return response
for result in response['entities_wikidata']:
entities_wikidata.append(result)
for result in response['entities_dbpedia']:
entities_db.append(result)
else:
r = requests.post(url, data=payload.encode('utf-8'), headers=headers)
if r.status_code == 200:
response=r.json()
return response
for result in response['entities_wikidata']:
entities_wikidata.append(result)
for result in response['entities_dbpedia']:
entities_db.append(result)
if len(entities_wikidata)>0:
entities_wikidata[0][1]=entities_wikidata[0][1].replace('<','').replace('>','')
if len(entities_db)>0:
entities_db=entities_db
return entities_wikidata,entities_db
except:
raise
return -1
def bioFalcon_call(text, mode='short'):
text=text.replace('"','')
text=text.replace("'","")
if mode=='short':
url = 'https://labs.tib.eu/sdm/biofalcon/api?mode='+mode
payload = '{"text":"'+text+'"}'
try:
r = requests.post(url, data=payload.encode('utf-8'), headers=headers)
if r.status_code == 200:
response=r.json()
if len(response['entities']) > 1:
return response['entities'][1][0]
else:
return ""
else:
return ""
except:
raise
return ""
else:
url = 'https://labs.tib.eu/sdm/biofalcon/api?mode='+mode
payload = '{"text":"'+text+'"}'
try:
r = requests.post(url, data=payload.encode('utf-8'), headers=headers)
if r.status_code == 200:
response=r.json()
return response
except:
raise
return ""
| 39.446602
| 94
| 0.488555
| 402
| 4,063
| 4.828358
| 0.139303
| 0.148377
| 0.111283
| 0.07831
| 0.846986
| 0.814013
| 0.809892
| 0.770737
| 0.770737
| 0.74137
| 0
| 0.019124
| 0.38223
| 4,063
| 103
| 95
| 39.446602
| 0.754183
| 0.007384
| 0
| 0.847826
| 0
| 0.021739
| 0.124682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.021739
| 0
| 0.163043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7081c16cd575a3edbe9448426cdfe43761c040d7
| 22,250
|
py
|
Python
|
alshamelah_api/apps/books/permissions.py
|
devna-dev/durar-backend
|
36ea29bafd4cb95098e4057eb71df211dc923008
|
[
"MIT"
] | null | null | null |
alshamelah_api/apps/books/permissions.py
|
devna-dev/durar-backend
|
36ea29bafd4cb95098e4057eb71df211dc923008
|
[
"MIT"
] | null | null | null |
alshamelah_api/apps/books/permissions.py
|
devna-dev/durar-backend
|
36ea29bafd4cb95098e4057eb71df211dc923008
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from rest_framework import permissions
from rolepermissions.checkers import has_permission
class CanManageBook(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.view_books)
# 'POST' method creation
if request.method == 'POST':
return has_permission(request.user, AppPermissions.create_books)
# 'PUT/PATCH' method update
if request.method in ['PUT', 'PATCH']:
return has_permission(request.user, AppPermissions.edit_books)
# Deleting Books
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_books):
return True
parent_permission = super(CanManageBook, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.edit_books):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
# Deleting rental items
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_books):
return True
parent_permission = super(CanManageBook, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanSubmitBook(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.submit_books)
# 'PUT/PATCH' method update
if request.method in ['PUT']:
return has_permission(request.user, AppPermissions.submit_books)
parent_permission = super(CanSubmitBook, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.submit_books):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
parent_permission = super(CanSubmitBook, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanManageBookRating(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.view_book_ratings)
# 'POST' method creation
if request.method == 'POST':
return has_permission(request.user, AppPermissions.create_book_rating)
# 'PUT/PATCH' method update
if request.method in ['PUT', 'PATCH']:
return has_permission(request.user, AppPermissions.edit_book_rating)
# Deleting Books
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_rating):
return True
parent_permission = super(CanManageBookRating, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.edit_book_rating):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
# Deleting rental items
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_rating):
return True
parent_permission = super(CanManageBookRating, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanManageBookComment(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.view_book_comments)
# 'POST' method creation
if request.method == 'POST':
return has_permission(request.user, AppPermissions.create_book_comment)
# 'PUT/PATCH' method update
if request.method in ['PUT', 'PATCH']:
return has_permission(request.user, AppPermissions.edit_book_comment)
# Deleting Books
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_comment):
return True
parent_permission = super(CanManageBookComment, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.edit_book_comment):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
# Deleting rental items
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_comment):
return True
parent_permission = super(CanManageBookComment, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanManageBookHighlight(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.view_book_highlights)
# 'POST' method creation
if request.method == 'POST':
return has_permission(request.user, AppPermissions.create_book_highlight)
# 'PUT/PATCH' method update
if request.method in ['PUT', 'PATCH']:
return has_permission(request.user, AppPermissions.edit_book_highlight)
# Deleting Books
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_highlight):
return True
parent_permission = super(CanManageBookHighlight, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.edit_book_highlight):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
# Deleting rental items
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_highlight):
return True
parent_permission = super(CanManageBookHighlight, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanManageBookMark(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.view_book_marks)
# 'POST' method creation
if request.method == 'POST':
return has_permission(request.user, AppPermissions.create_book_mark)
# 'PUT/PATCH' method update
if request.method in ['PUT', 'PATCH']:
return has_permission(request.user, AppPermissions.edit_book_mark)
# Deleting Books
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_mark):
return True
parent_permission = super(CanManageBookMark, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.edit_book_mark):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
# Deleting rental items
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_mark):
return True
parent_permission = super(CanManageBookMark, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanManageBookAudio(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.view_book_audio)
# 'POST' method creation
if request.method == 'POST':
return has_permission(request.user, AppPermissions.create_book_audio)
# 'PUT/PATCH' method update
if request.method in ['PUT', 'PATCH']:
return has_permission(request.user, AppPermissions.edit_book_audio)
# Deleting Books
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_audio):
return True
parent_permission = super(CanManageBookAudio, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.edit_book_audio):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
# Deleting rental items
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_audio):
return True
parent_permission = super(CanManageBookAudio, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanManageBookPdf(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.view_book_pdf)
# 'POST' method creation
if request.method == 'POST':
return has_permission(request.user, AppPermissions.create_book_pdf)
# 'PUT/PATCH' method update
if request.method in ['PUT', 'PATCH']:
return has_permission(request.user, AppPermissions.edit_book_pdf)
# Deleting Books
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_pdf):
return True
parent_permission = super(CanManageBookPdf, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.edit_book_pdf):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
# Deleting rental items
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_pdf):
return True
parent_permission = super(CanManageBookPdf, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanManageBookReview(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
from ..users.roles import AppPermissions
# Allow list to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
if request.method in permissions.SAFE_METHODS:
return has_permission(request.user, AppPermissions.view_book_review)
# 'POST' method creation
if request.method == 'POST':
return has_permission(request.user, AppPermissions.create_book_review)
# 'PUT/PATCH' method update
if request.method in ['PUT', 'PATCH']:
return has_permission(request.user, AppPermissions.edit_book_review)
# Deleting Books
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_rating):
return True
parent_permission = super(CanManageBookReview, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
from ..users.roles import AppPermissions
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['GET']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
# 'PUT' method, editing the rental items
if request.method in ['PUT', 'PATCH'] and has_permission(request.user, AppPermissions.edit_book_review):
return True
# 'PUT' method, editing the rental items
# Let user have access to a single object
if request.method in permissions.SAFE_METHODS:
return True
# Deleting rental items
if request.method == 'DELETE' and has_permission(request.user, AppPermissions.delete_book_review):
return True
parent_permission = super(CanManageBookReview, self).has_permission(request, view)
if not parent_permission:
return False
return True
class CanManageUserData(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
# Superuser can manage all the objects
if request.user.is_authenticated:
return True
parent_permission = super(CanManageUserData, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
"""
Manages only permissions for editing and deleting the objects
"""
# Superuser can manage all the objects
if request.user.is_authenticated:
return True
parent_permission = super(CanManageUserData, self).has_permission(request, view)
if not parent_permission:
return False
return True
| 33.358321
| 115
| 0.651191
| 2,509
| 22,250
| 5.643683
| 0.041849
| 0.062288
| 0.082627
| 0.064831
| 0.969633
| 0.969633
| 0.969633
| 0.968856
| 0.958898
| 0.955155
| 0
| 0.000062
| 0.278337
| 22,250
| 666
| 116
| 33.408408
| 0.881796
| 0.165169
| 0
| 0.853211
| 0
| 0
| 0.018314
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061162
| false
| 0
| 0.06422
| 0
| 0.577982
| 0.003058
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
565ec712da897a03d3e2e74e541d76385de65132
| 1,169
|
py
|
Python
|
tests/test_index.py
|
sgowdaks/mtdata
|
3db2b9cf035fe1b51fd59658d58fa8ce037a47d9
|
[
"Apache-2.0"
] | 81
|
2020-04-07T02:55:39.000Z
|
2022-03-30T05:28:58.000Z
|
tests/test_index.py
|
sgowdaks/mtdata
|
3db2b9cf035fe1b51fd59658d58fa8ce037a47d9
|
[
"Apache-2.0"
] | 77
|
2020-04-07T19:53:48.000Z
|
2022-03-22T18:41:08.000Z
|
tests/test_index.py
|
sgowdaks/mtdata
|
3db2b9cf035fe1b51fd59658d58fa8ce037a47d9
|
[
"Apache-2.0"
] | 6
|
2020-04-16T22:21:19.000Z
|
2022-02-07T20:52:15.000Z
|
#!/usr/bin/env python
#
#
# Author: Thamme Gowda
# Created: 10/12/21
from mtdata.index import is_compatible, bcp47
def test_is_compatible():
assert is_compatible(bcp47('en'), bcp47('en'))
assert is_compatible(bcp47('en'), bcp47('en_US'))
assert is_compatible(bcp47('en_US'), bcp47('en'))
assert not is_compatible(bcp47('en_US'), bcp47('en_GB'))
assert not is_compatible(bcp47('en_US'), bcp47('en_IN'))
assert is_compatible(bcp47('en_US'), bcp47('en_Latn'))
assert is_compatible(bcp47('en_US'), bcp47('en_Latn_US'))
assert is_compatible(bcp47('por_BR'), bcp47('por'))
assert is_compatible(bcp47('por_PT'), bcp47('por'))
assert not is_compatible(bcp47('por_PT'), bcp47('por_BR'))
assert is_compatible(bcp47('kan'), bcp47('kan_IN'))
assert is_compatible(bcp47('kan'), bcp47('kan_Knda'))
assert is_compatible(bcp47('kan'), bcp47('kan_Knda_IN'))
assert not is_compatible(bcp47('kan'), bcp47('kan_Deva_IN'))
assert not is_compatible(bcp47('hin_Deva_In'), bcp47('kan_Deva_IN'))
assert not is_compatible(bcp47('hin_In'), bcp47('kan_Deva_IN'))
assert not is_compatible(bcp47('hin'), bcp47('kan_Deva_IN'))
| 38.966667
| 72
| 0.697177
| 178
| 1,169
| 4.297753
| 0.191011
| 0.298039
| 0.4
| 0.300654
| 0.824837
| 0.760784
| 0.720261
| 0.513725
| 0.369935
| 0.173856
| 0
| 0.07451
| 0.127459
| 1,169
| 29
| 73
| 40.310345
| 0.67549
| 0.05047
| 0
| 0
| 0
| 0
| 0.177376
| 0
| 0
| 0
| 0
| 0
| 0.894737
| 1
| 0.052632
| true
| 0
| 0.052632
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
565f0384e48199694bbe9a81f4dccb107136b8b4
| 1,359
|
py
|
Python
|
tests/test_log_requests.py
|
ex-tipsi/pytest-tipsi-testing-backup
|
26d73933f477571de4ef739a5f3b7d461e49832c
|
[
"MIT"
] | 2
|
2018-01-25T02:46:07.000Z
|
2019-04-15T18:27:15.000Z
|
tests/test_log_requests.py
|
ex-tipsi/pytest-tipsi-testing-backup
|
26d73933f477571de4ef739a5f3b7d461e49832c
|
[
"MIT"
] | 1
|
2019-10-09T13:13:42.000Z
|
2019-10-09T13:13:42.000Z
|
tests/test_log_requests.py
|
ex-tipsi/pytest-tipsi-testing-backup
|
26d73933f477571de4ef739a5f3b7d461e49832c
|
[
"MIT"
] | 2
|
2019-10-09T08:15:22.000Z
|
2019-10-14T14:32:30.000Z
|
import os
import json
from unittest.mock import patch
import requests
def test_docme(tmpdir, log_requests):
with patch.dict('os.environ', {'DOCS_ROOT': tmpdir.strpath}):
with log_requests('out'):
r = requests.get('http://echo.jsontest.com/key/value/one/two')
assert r.status_code == 200, r
outfile = tmpdir.join('tests.test_log_requests.out.json')
assert os.path.exists(outfile.strpath)
data = json.loads(outfile.read())
assert len(data) == 1, data
data = data[0]
assert data['method'] == 'get', data
assert data['response'] == "{'key': 'value', 'one': 'two'}", data
def test_docme_second(tmpdir, log_requests):
with patch.dict('os.environ', {'DOCS_ROOT': tmpdir.strpath}):
with log_requests('out'):
r = requests.get('http://echo.jsontest.com/key/value/one/two')
assert r.status_code == 200, r
with log_requests('out'):
r = requests.get('http://echo.jsontest.com/key/value/one/two')
assert r.status_code == 200, r
outfile = tmpdir.join('tests.test_log_requests.out.json')
assert os.path.exists(outfile.strpath)
data = json.loads(outfile.read())
assert len(data) == 1, data
data = data[0]
assert data['method'] == 'get', data
assert data['response'] == "{'key': 'value', 'one': 'two'}", data
| 33.975
| 74
| 0.62546
| 187
| 1,359
| 4.454545
| 0.256684
| 0.092437
| 0.084034
| 0.084034
| 0.893157
| 0.893157
| 0.893157
| 0.893157
| 0.893157
| 0.893157
| 0
| 0.012048
| 0.206034
| 1,359
| 39
| 75
| 34.846154
| 0.759963
| 0
| 0
| 0.806452
| 0
| 0
| 0.243561
| 0.047093
| 0
| 0
| 0
| 0
| 0.354839
| 1
| 0.064516
| false
| 0
| 0.129032
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
569bb446e1bb2fc396db2d44db8bd823b9ea83e4
| 85
|
py
|
Python
|
ips/ip/ir_recieve/__init__.py
|
zld012739/zldrepository
|
5635b78a168956091676ef4dd99fa564be0e5ba0
|
[
"MIT"
] | null | null | null |
ips/ip/ir_recieve/__init__.py
|
zld012739/zldrepository
|
5635b78a168956091676ef4dd99fa564be0e5ba0
|
[
"MIT"
] | null | null | null |
ips/ip/ir_recieve/__init__.py
|
zld012739/zldrepository
|
5635b78a168956091676ef4dd99fa564be0e5ba0
|
[
"MIT"
] | null | null | null |
from ir_recieve_partial import get_ip_name
from ir_recieve_partial import IR_RECIEVE
| 28.333333
| 42
| 0.905882
| 15
| 85
| 4.666667
| 0.533333
| 0.385714
| 0.371429
| 0.571429
| 0.742857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 85
| 2
| 43
| 42.5
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3b061aba176bee9c7b313a54d7ecce529a4384a0
| 6,656
|
py
|
Python
|
decode_exchange_string.py
|
selplacei/factorio-stuff
|
c809bcd0312d2568390104e391abbd18a4f03e75
|
[
"Unlicense"
] | null | null | null |
decode_exchange_string.py
|
selplacei/factorio-stuff
|
c809bcd0312d2568390104e391abbd18a4f03e75
|
[
"Unlicense"
] | null | null | null |
decode_exchange_string.py
|
selplacei/factorio-stuff
|
c809bcd0312d2568390104e391abbd18a4f03e75
|
[
"Unlicense"
] | null | null | null |
import blueprint
import json
bp = blueprint.Blueprint.from_exchange_string('0eNrt3dmS47iVBuB3yevixAHAte5m8Sz2bPaMZ3N0dFASJHGSImUulZ3u6Hc3yKxFIgkSODjKsXtw4ajocupPkoJ+giD11Y9Pu7KX16aouqePPz4V+7pqnz7+7sentjhVeTn8Xfd6lU8fn4pOXp4+PFX5Zfivtt+1Xd4VdfX004enojrIH54+sp+++/Akq67oCvmWMv7H6/dVf9nJ5ukjfH398Hu6vOqCfX3ZFVXe1Y0Kv9ZtMWaqX6vy4C+iD0+v45/ql6iXdE1dfr+T5/xToX5e/dCxKDvZGG3xS10fZBXsz7Ltnsa4fthnuNn6D5shRVOvRXCDiLaTslzJEEYZdZOfZKCO4PNiSGgQ0jV51V7rpgt2slzelsgg5pi3XWCQFRtkyR+ujWxbk7jEIK5XP9qcGvXSgz4oNd1Ho7TMYi+NAhmYjIdrWXTqg7CcwEz3cD2GW+zaepLJEN/1TSWboKha2WiDTIb5eoLJCC/r6hScc/WKw8b2xKZHej3GZGy/Nd9GUPrTdz8NWZMaZnY1zChrWP26/fPGZhs14BBjchCMOvmSl2UgS7nvmmIfXOtSorv5Ig9FfzEIMxm8u+JkkGQyiO9P2Kh2vhZXie7i4cVBVwdvfYdu4mt/uaKLt8mLEl+y6qRUVIE69V7xNTtsQfD5Z9AlO4bsz+PWrEQJo2pTH/y6Kz5JfMHu8+ZUBy/5STO0jDr2WPbFYS3EZIDmTVeUpWxe14ISs12yKlRuV6icslC1c0CjGr0Wag6ifn2F786vEUEj1VjCN2dZn4q2U0XX1Lu6Q3fm+BY0/X444CtRkc0WjdP1IN8Pn5Tg2tSfhn1GF+kk9pq37WZuYp/7+SoB3bWTuF1/PGo2LrNPa+Tve/WnbgpgUsjDeztcJODr+O28X+aac4pZG6tZ4UvRSHwJnxqpLkz1GaFR9RXd+SLH4/utedB1fJD7YSRuZsWmn8dJJaKb+Vq/qM1qX4puf7aqaGFX0YKyotUH+9Tkl0u+K6W6NJL5s9PMt65ksFMTwmd0Z6v92TfSoarP+R/y5hCs5oRGH55jUcmNoMgiyGTDjCpaXe6pd7hEd/G+LI7HQF0Pl/VQ7i26hRt5zYtGnSQ077dJ935b40OXrWqE2/PqVR2gymXBob+qj8RBrgdxm30LdnX97LDgUBelyzqDOpfl6rKvOqlxiC/eti7z4c2uZOkwD97v+0tfujVt1e9LqbalkWraU9vNh0O7sg0py/YsczWl0l2qmgzNMUH+oK6wqpPLosI4ILq+2elGhMU62KWoiuoUHBpdJ5kM0a8rCZtpJkO1Ph7bc92oj7Dumjw2vKD/X123Jcbnw6M6TPkeP9d9W5VfS8lsDvBakFHdqksCedmVw7t0yfdnNYIChi/bhTSOb9yFNIHvXVW7wdvZu3l1WIQ4y0uxz8vxfNLh+3evmqwpjr3msomZzR12+NrdqbrVLV1o2jaya9uIdDn3KtWs61If+lI6rEJ8C9EMTG4bI9CNK9Vl7nChrw7x2o6FqCjN7kW4MIGv3UYlGOxlgsvS7GaKTBPoRh7u+uJbeF/rFleNr9TwNTvea65dFhb29fWq5g61y8JC3+TVcD9DG2J0dZa/BMeiPeM7dTwYqts1l3Rmq7pvh2MlRVOwsV3BxqQFW5fFQc0ndNcDxnMb/W6bDEb16nHxbqdZGTd6aqEv1bwIfy8sH+4ov+L7c/1a3Ggpq+nVJeswY1HHoZH41QF1ifHpdSvIaI22OJ27rSCj5dl+WFIahvZKkFFpXuUw3qXqjFPerqYx40GjRl6+V5+DtTSTYfySD/eL11IsenU/rOc5PJgwVNrwoXrGF+uYcRou01/O0mXJQF6u6nS7dmAS4+ufuhpWoYtm3xedVc8mdj2bUPZsfviUV/thKXJtw83mL3vZtsPVUV8V+OfE3laS9BnC5np0Kyw0ukX7OuzUeEctODbDX6IfRlAfw7IsHNZn1UF+lsPwGN90/Y6Z3TZ7CQ6yaocJ59sKZ9/gVxI+b5n2jJ1ZrL3pT/tgMXXjInJYp/0akuJr90vIsD/BXpYO3du36jPaX4O1SGZ576zv6sv4aEzQ7gupWmC62s5sHxj7fKtzO85krF8K9VHJm1ddnO00+MtSyXacpqdTu55Oie+lHT7fg9jefpPx1A1H1+DYmq1A5CpiM0oYPS+lLh/xS71tv/uyNnfqK3xRn+tO93qze78XNW0P1mIS804t875So7fBP8dbqj+7czPcRMbPlPPqMKygO6zuHotG1fsluOSn/A/aJKPZRiGb/dtpua8O7UaiTVcbBQrzMaQGgbb4Q5s91eVZlrS61qnq1ZTY5qIy2M5LLI7+dpqmmzO7bs4ou/nbwdjYDWFV0d8e69PncOMWWXy5sHqrZ0HMqpvVO3EZllLU27KYEVk2WZBfLjW6ok+NrPKDdHjQoR8e4wrWcozWfeuiHb5ck19b3WK0STu3ajZ/qF/WgwyfeDjKanwIai3JZAAf1Lm8Ge+ir2dxo61SWfXr1mYJ47UjdR6qHR57eFvJWgkxGc3jMr+63lmJiY0fDlsJSexCgsszt/tiheUX3BgQLxV/eXYkkL/vi+tFVvh1jGPfjg/Qvj3+sRHIzddvN5KERZJ6ezbSjNaVZdkFxeUyrCFsbZ3ZOnPdPstSduroracZjWn15tfqB4IvT7tuZCaozO0jaVLh1Vgon4px4KzHGT1NUcnmNJz0C1ketkY0WAdu77PZDPzLwSzzVhXHeNZo5VYyNztt7M95c5KmoQIVuvLUvuGDb8MDOC+5bnJv8pk5aW/JGU1h1NVA1zeN7PC1//buraXoOt/223SkX6e7mweubb3VPHsliNsFDSOtG69P3b4b0uSHHH/r8PP6QVuUtUWtj99Jmtw7sujxycu/3ju0qO1JxPhEoUU7T17+9aajRSNPIr7ebrQp4UnG3f1Bm96dHo3bO4M2LTvdpy93Pm1K9VPRdL36m29bM/5EADY9qgthNkWqC+E2faoLETaFqgsJ7frU8st0zPHbdLqtjix6VJcRW1SoLiOx6ExdRmrRnLqMzKI5dRl/aVGfuoy/suhPXcZfW5SoLuNvLFpUl/ELmxrVhfytTYfqQv7OpkB1IX9P0aD/QNGgv6Ro0F9RNOg/UjToP9k1qOV33Zh4TIP+M0GD/gtBg/4rQYP+mqBBf0PQoP9G0KD/TtCgvyVo0P8gaND/pGjQ/6Jo0P+maND/oWjQRh4oOnT8WjJFjw7f0KOo0ldZlvULRZ9ei8ruiQdm+Y02Fj6mUveveUXQqi/nwupZ4JUh8kpQrrtS96REqHk7Uhwkxzwk5yE5D8l5SM5DcjSQXIaT5JiX5Lwk5yU5L8l5SW4qyQGOkmOekvOUnKfkPCXnKbmHU3Kc4Sw55i05b8l5S85bct6Ss2lbjsPkmMfkPCbnMTmPyXlMzqpuBU6TY16T85qc1+S8Juc1ua2GDXGcHPvZc3IsStxAudsAHCnHRCjcVbm7FEdYLuVRSGXL3WU58nJ3WY7CHOMhqTF3l0egzMVRkoYk0twkyUGbEyKKwjiKCdC5eZSbPSeE2kmepiQE3TzMWaKbR+qqOsKJdOzBIl2cxELA7Q44wXSzOLxPF0IYs4wDqVM3D3X26lRkmKYq1Nmtmye5+3XzTArHLhJpAlHGKTy7eZaDa8cgiaIkTgWnAO6W0vDSXcBZGMVhGoeE5t1iKJl+FyVcJZMbeNNYEglvGvpn6eHxGAfisfcB8eKEqXK/PcruLt4s0/N4f4o8Hr+Z1DkAebcxWCIvYoIMybvLImLyhk5OwpQzyKjBvMVoFzpvMZAE0VPJnAuWpJyG01vMI4H1gpAlScYSUl9vHqqr/ATn7LF3dfYCkQBPUkbE7c3iUOreLMUR3wvURBmyJCIw+GZRrhRfCAIiESXMkeSb57jRfCxKIQojGqBvGubI9KmpOyOT+u7DnLG++zhXry/JREJB9t3n4NU+xqM0VJd1NHjfPA1v+M2z3Ci/eZ6u51Ok6MfeQ/Sb7wYN7Pc5NyHz/WaBFMwfj1icxCwl1v5msRTo3xiaROwB9t8smpYAHOJjNdmklQBnqTQg4CyW0AX8nB0+jAec/wI6JTBKE86Y7a0HAyxwHow2A9+iIu5EB85D0ILgPMoBEoxACA63a5K6M06G9ATZ4z3B+U64sYKMqeEOEeP0vOA8GsMMDstcasoXE2iDsygUOhiyLIRs82aDgT04S0IRhLMUtETII5HdfNbQHOEkx80kjNl23xuyhPdRaJnw7q6QA054n4P3Ce9z8EThfQ5eKbzPwUOFPIySeLOsBSCxQvY4rHCy5WiwkMUiDQnQwvscPFx4n4PHC+9z8IDhfQ4eMQyz2zkGHjK8z8Fjhvc5eNDwPscBNRQ8iVMK2HAShMcNJ0EeOHxf4FAwJHDIPHDogUMPHHrg0AOH00rlSOCQeeDwAcChiHHAIffAoQcOPXDogUMPHJIAhyLBAYfcA4ceOPTAoQcOPXA4bdQUBxxyDxx64NADhx449MDhw4FDkeGAQ+6BQw8ceuDQA4ceOLRo2xBwwCH3wKEHDj1waAAcMp6SEod3eQTI4V3ez4Q5vP3KMAI6vHs5ijqMecZiyLaf4QsZTjzkDxQP5xvvAh8ywRlXeUQA4jzOGUKcRxKAiIwnmeDh5iPXVjDiPNQdSGQsA5FkESeFEuepFGBixFUo5zx0gRPnIRhAcSEFAylGKkV91LLMGVRcSELDigxCUKMsyVIKYXEpDUktMoA4EUzY8l6L5uJSmAO+yIEJ1SThrXWia32OUxj5wxTGpY1Hc4wLYXiXUYWFaaL+l7rxjAs5OKVx9C84RLHg7ljjUpij2RgwSEQi7kw9R7pxKdJRcFyKdIUcIYlECpzac1yIJWAdWayCk/ju6xIOtuNSnAPwqPY4gkwwTiA8LmS5EY+ZyKI0u/3IuBiPC2nOyONCpu5UIHDKI3+w8hgmqsPDjHEi5nGeh3ce4yQCDikp8zjLdFYeRRKmcUSAPM6C3I3HWSQF8RjHaRgnJMLjLMoBeORqkpfGaUrBO86z8LjjjB90hR1ngVSoI4/53fdHaUzHaSoF6TjNpBAdp5kUrOM0U3dyCHG2I38f21FkapYfU8qOk0Rn15FlcQypG+44yXASHidZOOZxEuJkPU6y3MBHoOAewRl7BDLqEcihR6DmHYEGdQR6ynFAIkUcplSU4zyPhHK8/yY1heM4SdR1e4RDHPn7Io7j+kQCd0C/m+O4kIijHBeCXDVHdQ3NkpiFln2vAR0X0lxNx2GnRSSS22+341THpSQ311HtbxylCQgR09iOS4GOvqOKjFSPsISnZMzjYqaz9riY6oo+qtCQ81QApBT242IcnoAMwkRkUXjrFDkhkEt5eAZSpaUszFLb2wV6CHIpUXe2iJEUJH8PCnJpR2gwyGDgBWPBgUyDnCdScJBvqbf/7AONBznPpQAhg/Ef3REQPkCEnGcTk5BMgDrph4ITo5DzXCIWch5MCENmcRin2e2/hUItQy78BjoaEshNSCDAIMFJgQQC/hFI3EeD5z8TJPjIHw8+ApX0CPTCIzjJjkBAOoKb5QjuiCO46Y3gzjaCu9gIRFgjEEmNQMA0AoXRCBRAI1DojEBBMwKFy2jQpynSZOSPMxmBwGMEAosRCBxGIDAYgcBfBAJ7EQjcRSAwF4HAWwQKaxEonEWgMBbB+4r/V75imCF9Re59Re8rel/R+4reV5xUagRIX5F7X/EBvmIU4nxF4X1F7yt6X9H7it5XJPEVowjnKwrvK3pf0fuK3lf0vuK0UWOcryi8r+h9Re8rel/R+4oP9xWjBOcrCu8rel/R+4reV/S+ok3bpjhfUXhf0fuK3lc08BWBVFcEUlsRfnayIri4iuCoKhrUbYbjFMUDOUWgcRSJ/ERCN5HUSwRKJxEIfUQgdRGB1EMEFwcRnPxDcHMPwdk7BALnECh4Q3BWDYEAMwQSw3C7YWPA0YXiYXQhEIiFQAAVgptPCI4sIbhjhEBFEAIVPAhU3CBQIYNAagsCKSkIJJIgkACCQAAHAhUYCCRQIBACgQZFy3AwoHgwDAhEICAQQICkAiAh/QfO5h8QYn9AqvxR8H4krh9QgH5AIfkBneEH9HofUMN9QGv2AS3XB7RSn0FRcxzSJ94H6QNKnw/oaD5wU/mABuQDR4sPaBg+8AKfF/hoBT4gcfeAWNsDUmnPoJwFTtkT76rsAZGtB26iHlA5ekCg5wGZmQeOUh4Q+XhAo+IBlYUHZAIeELp3QKfdAYVxBxSyHdCAdkDh2AEVX2fQviFSrRPvodYBNVZHh9TR4nRAjNIBKUYHD0Do4EH4HNCac0BMzcEjhLnHyXJelPsZinJxhBTlhBflvCjnRTkvynlR7q5PY6QoJ7wo50U5L8p5Ue7/uygXJ0hRTnhRzotyXpTzopwX5aaVmiJFOeFFOWdR7rsPb5fzt99m/fD0SR288YDzdPin0rIkToZ/PkkNqS//1/e71274Leqa7I+uONt1')
for e in bp.data['blueprint']['entities']:
print(json.dumps(e, indent=4))
| 832
| 6,546
| 0.961839
| 229
| 6,656
| 27.947598
| 0.956332
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158957
| 0.002855
| 6,656
| 7
| 6,547
| 950.857143
| 0.805334
| 0
| 0
| 0
| 0
| 0.2
| 0.978666
| 0.976112
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0.8
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 9
|
3b91e6eaecf8f2c6bd3406c5523b4fe71a918c8f
| 145
|
py
|
Python
|
loldib/getratings/models/NA/na_riven/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_riven/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_riven/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from .na_riven_top import *
from .na_riven_jng import *
from .na_riven_mid import *
from .na_riven_bot import *
from .na_riven_sup import *
| 24.166667
| 28
| 0.758621
| 25
| 145
| 4
| 0.36
| 0.3
| 0.55
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 145
| 5
| 29
| 29
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
8e8411f03889d4695f356eaa7eac50a9c651cdc9
| 4,302
|
py
|
Python
|
server/rideshare/migrations/0035_auto_20170517_0555.py
|
aadabi/tagrides
|
980a6f7df62f1bb9b396eca79421388c36987f45
|
[
"Unlicense"
] | 2
|
2018-03-21T23:32:09.000Z
|
2018-03-22T01:39:51.000Z
|
server/rideshare/migrations/0035_auto_20170517_0555.py
|
aadabi/Cruzer
|
980a6f7df62f1bb9b396eca79421388c36987f45
|
[
"Unlicense"
] | null | null | null |
server/rideshare/migrations/0035_auto_20170517_0555.py
|
aadabi/Cruzer
|
980a6f7df62f1bb9b396eca79421388c36987f45
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-17 05:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rideshare', '0034_auto_20170516_0702'),
]
operations = [
migrations.AlterField(
model_name='driveractive',
name='driverod_departure_lat',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='driveractive',
name='driverod_departure_lon',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='driveractive',
name='driverod_destination_lat',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='driveractive',
name='driverod_destination_lon',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='plannedtrips',
name='driver_departure_latitude',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='plannedtrips',
name='driver_departure_longitude',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='plannedtrips',
name='driver_destination_latitude',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='plannedtrips',
name='driver_destination_longitude',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='plannedtrips',
name='driver_timeofdeparture_hour',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='plannedtrips',
name='driver_timeofdeparture_minute',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='proposedtrips',
name='rider_departure_latitude',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='proposedtrips',
name='rider_departure_longitude',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='proposedtrips',
name='rider_destination_latitude',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='proposedtrips',
name='rider_destination_longitude',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='proposedtrips',
name='rider_timeofdeparture_hour',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='proposedtrips',
name='rider_timeofdeparture_minute',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='rideractive',
name='riderod_departure_lat',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='rideractive',
name='riderod_departure_lon',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='rideractive',
name='riderod_destination_lat',
field=models.FloatField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='rideractive',
name='riderod_destination_lon',
field=models.FloatField(blank=True, max_length=200, null=True),
),
]
| 37.086207
| 75
| 0.603673
| 404
| 4,302
| 6.220297
| 0.14604
| 0.159172
| 0.198965
| 0.2308
| 0.923199
| 0.923199
| 0.923199
| 0.923199
| 0.902109
| 0.902109
| 0
| 0.026118
| 0.288006
| 4,302
| 115
| 76
| 37.408696
| 0.794319
| 0.015342
| 0
| 0.740741
| 1
| 0
| 0.182377
| 0.123081
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018519
| 0
| 0.046296
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
d900d27237234bbe12d722aa8c9e77b4afd6f305
| 2,276
|
py
|
Python
|
test/test_bvr_hi.py
|
doedotdev/bvr
|
023fc93424fa6a50c8a3c2ce2fc48b76a041b58c
|
[
"MIT"
] | null | null | null |
test/test_bvr_hi.py
|
doedotdev/bvr
|
023fc93424fa6a50c8a3c2ce2fc48b76a041b58c
|
[
"MIT"
] | 12
|
2019-12-07T21:40:23.000Z
|
2019-12-07T21:43:54.000Z
|
test/test_bvr_hi.py
|
doedotdev/bvr
|
023fc93424fa6a50c8a3c2ce2fc48b76a041b58c
|
[
"MIT"
] | null | null | null |
import logging
from bvr.bvr_hi import bvr_hi
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def test_bvr_hi_called_as_decorator(caplog):
@bvr_hi
def hi():
return 2
return_value = hi()
assert return_value == 2
assert hi.__name__ == "hi" # Important for decorators to not override method name
assert len(caplog.records) == 2
assert caplog.records[0].msg == 'Hi'
assert caplog.records[0].levelname == 'INFO'
assert caplog.records[1].msg == 'Hi'
assert caplog.records[1].levelname == 'INFO'
def test_bvr_hi_called_as_callable_returning_decorator(caplog):
@bvr_hi()
def hi():
return 2
return_value = hi()
assert return_value == 2
assert hi.__name__ == "hi" # Important for decorators to not override method name
assert len(caplog.records) == 2
assert caplog.records[0].msg == 'Hi'
assert caplog.records[0].levelname == 'INFO'
assert caplog.records[1].msg == 'Hi'
assert caplog.records[1].levelname == 'INFO'
def test_bvr_hi_called_as_decorator_with_function_args(caplog):
@bvr_hi
def hi(msg):
logger.info(msg)
return msg
return_value = hi("Hello")
assert return_value == "Hello"
assert hi.__name__ == "hi" # Important for decorators to not override method name
assert len(caplog.records) == 3
assert caplog.records[0].msg == 'Hi'
assert caplog.records[0].levelname == 'INFO'
assert caplog.records[1].msg == 'Hello'
assert caplog.records[1].levelname == 'INFO'
assert caplog.records[2].msg == 'Hi'
assert caplog.records[2].levelname == 'INFO'
def test_bvr_hi_called_as_callable_returning_decorator_with_function_args(caplog):
@bvr_hi()
def hi(msg):
logger.info(msg)
return msg
return_value = hi("Hello")
assert return_value == "Hello"
assert hi.__name__ == "hi" # Important for decorators to not override method name
assert len(caplog.records) == 3
assert caplog.records[0].msg == 'Hi'
assert caplog.records[0].levelname == 'INFO'
assert caplog.records[1].msg == 'Hello'
assert caplog.records[1].levelname == 'INFO'
assert caplog.records[2].msg == 'Hi'
assert caplog.records[2].levelname == 'INFO'
| 26.776471
| 86
| 0.670475
| 310
| 2,276
| 4.716129
| 0.132258
| 0.213406
| 0.259918
| 0.109439
| 0.934337
| 0.934337
| 0.934337
| 0.920657
| 0.920657
| 0.920657
| 0
| 0.015504
| 0.206503
| 2,276
| 84
| 87
| 27.095238
| 0.79402
| 0.092707
| 0
| 0.862069
| 0
| 0
| 0.045631
| 0
| 0
| 0
| 0
| 0
| 0.551724
| 1
| 0.137931
| false
| 0
| 0.034483
| 0.034483
| 0.241379
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
d96d74a5691df6e353d73060064a6f5a63632727
| 25,737
|
py
|
Python
|
fhir/resources/tests/test_servicerequest.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/tests/test_servicerequest.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/tests/test_servicerequest.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/ServiceRequest
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import servicerequest
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class ServiceRequestTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("ServiceRequest", js["resourceType"])
return servicerequest.ServiceRequest(js)
def testServiceRequest1(self):
inst = self.instantiate_from("servicerequest-example2.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest1(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest1(inst2)
def implServiceRequest1(self, inst):
self.assertEqual(
force_bytes(inst.asNeededCodeableConcept.text),
force_bytes("as needed to clear mucus"),
)
self.assertEqual(inst.authoredOn.date, FHIRDate("2017-02-01T17:23:07Z").date)
self.assertEqual(inst.authoredOn.as_json(), "2017-02-01T17:23:07Z")
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("34431008"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Physiotherapy of chest (regime/therapy) "),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.contained[0].id), force_bytes("signature"))
self.assertEqual(
force_bytes(inst.contained[1].id), force_bytes("cystic-fibrosis")
)
self.assertEqual(force_bytes(inst.id), force_bytes("physiotherapy"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://goodhealth.org/placer-ids"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("PLAC")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].display),
force_bytes("Placer Identifier"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.text), force_bytes("Placer")
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("20170201-0001")
)
self.assertEqual(force_bytes(inst.intent), force_bytes("order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(inst.occurrenceTiming.repeat.duration, 15)
self.assertEqual(inst.occurrenceTiming.repeat.durationMax, 25)
self.assertEqual(
force_bytes(inst.occurrenceTiming.repeat.durationUnit), force_bytes("min")
)
self.assertEqual(inst.occurrenceTiming.repeat.frequency, 1)
self.assertEqual(inst.occurrenceTiming.repeat.frequencyMax, 4)
self.assertEqual(inst.occurrenceTiming.repeat.period, 1)
self.assertEqual(
force_bytes(inst.occurrenceTiming.repeat.periodUnit), force_bytes("d")
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest2(self):
inst = self.instantiate_from("servicerequest-example3.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest2(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest2(inst2)
def implServiceRequest2(self, inst):
self.assertEqual(inst.authoredOn.date, FHIRDate("2017-02-01T17:23:07Z").date)
self.assertEqual(inst.authoredOn.as_json(), "2017-02-01T17:23:07Z")
self.assertEqual(
force_bytes(inst.code.coding[0].code), force_bytes("359962006")
)
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Turning patient in bed (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertTrue(inst.doNotPerform)
self.assertEqual(force_bytes(inst.id), force_bytes("do-not-turn"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://goodhealth.org/placer-ids"),
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("20170201-0002")
)
self.assertEqual(force_bytes(inst.intent), force_bytes("order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.priority), force_bytes("stat"))
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest3(self):
inst = self.instantiate_from("servicerequest-example-lipid.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest3(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest3(inst2)
def implServiceRequest3(self, inst):
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("LIPID"))
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://acme.org/tests"),
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Lipid Panel"))
self.assertEqual(force_bytes(inst.contained[0].id), force_bytes("fasting"))
self.assertEqual(force_bytes(inst.contained[1].id), force_bytes("serum"))
self.assertEqual(force_bytes(inst.id), force_bytes("lipid"))
self.assertEqual(
force_bytes(inst.identifier[0].system), force_bytes("urn:oid:1.3.4.5.6.7")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].code), force_bytes("PLAC")
)
self.assertEqual(
force_bytes(inst.identifier[0].type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0203"),
)
self.assertEqual(
force_bytes(inst.identifier[0].type.text), force_bytes("Placer")
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("2345234234234")
)
self.assertEqual(force_bytes(inst.intent), force_bytes("original-order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.note[0].text), force_bytes("patient is afraid of needles")
)
self.assertEqual(
inst.occurrenceDateTime.date, FHIRDate("2013-05-02T16:16:00-07:00").date
)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2013-05-02T16:16:00-07:00")
self.assertEqual(
force_bytes(inst.reasonCode[0].coding[0].code), force_bytes("V173")
)
self.assertEqual(
force_bytes(inst.reasonCode[0].coding[0].display),
force_bytes("Fam hx-ischem heart dis"),
)
self.assertEqual(
force_bytes(inst.reasonCode[0].coding[0].system),
force_bytes("http://hl7.org/fhir/sid/icd-9"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest4(self):
inst = self.instantiate_from("servicerequest-example-colonoscopy-bx.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest4(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest4(inst2)
def implServiceRequest4(self, inst):
self.assertEqual(inst.authoredOn.date, FHIRDate("2017-03-05").date)
self.assertEqual(inst.authoredOn.as_json(), "2017-03-05")
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("76164006"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Biopsy of colon (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Biopsy of colon"))
self.assertEqual(force_bytes(inst.id), force_bytes("colon-biopsy"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("12345"))
self.assertEqual(force_bytes(inst.intent), force_bytes("order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.requisition.system),
force_bytes("http://bumc.org/requisitions"),
)
self.assertEqual(force_bytes(inst.requisition.value), force_bytes("req12345"))
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest5(self):
inst = self.instantiate_from("servicerequest-example4.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest5(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest5(inst2)
def implServiceRequest5(self, inst):
self.assertEqual(
force_bytes(inst.code.coding[0].code), force_bytes("229115003")
)
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Bench Press (regime/therapy) "),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("benchpress"))
self.assertEqual(force_bytes(inst.intent), force_bytes("plan"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(inst.occurrenceTiming.repeat.count, 20)
self.assertEqual(inst.occurrenceTiming.repeat.countMax, 30)
self.assertEqual(inst.occurrenceTiming.repeat.frequency, 3)
self.assertEqual(inst.occurrenceTiming.repeat.period, 1)
self.assertEqual(
force_bytes(inst.occurrenceTiming.repeat.periodUnit), force_bytes("wk")
)
self.assertEqual(
force_bytes(inst.patientInstruction),
force_bytes(
"Start with 30kg 10-15 repetitions for three sets and increase in increments of 5kg when you feel ready"
),
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest6(self):
inst = self.instantiate_from("servicerequest-example-edu.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest6(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest6(inst2)
def implServiceRequest6(self, inst):
self.assertEqual(inst.authoredOn.date, FHIRDate("2016-08-16").date)
self.assertEqual(inst.authoredOn.as_json(), "2016-08-16")
self.assertEqual(
force_bytes(inst.category[0].coding[0].code), force_bytes("311401005")
)
self.assertEqual(
force_bytes(inst.category[0].coding[0].display),
force_bytes("Patient education (procedure)"),
)
self.assertEqual(
force_bytes(inst.category[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.category[0].text), force_bytes("Education"))
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("48023004"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Breast self-examination technique education (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.code.text),
force_bytes("Health education - breast examination"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("education"))
self.assertEqual(force_bytes(inst.intent), force_bytes("order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2014-08-16")
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("early detection of breast mass"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest7(self):
inst = self.instantiate_from("servicerequest-example-ventilation.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest7(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest7(inst2)
def implServiceRequest7(self, inst):
self.assertEqual(inst.authoredOn.date, FHIRDate("2018-02-20").date)
self.assertEqual(inst.authoredOn.as_json(), "2018-02-20")
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("40617009"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Artificial respiration (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.code.text), force_bytes("Mechanical Ventilation")
)
self.assertEqual(force_bytes(inst.id), force_bytes("vent"))
self.assertEqual(force_bytes(inst.intent), force_bytes("order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.orderDetail[0].coding[0].code), force_bytes("243144002")
)
self.assertEqual(
force_bytes(inst.orderDetail[0].coding[0].display),
force_bytes("Patient triggered inspiratory assistance (procedure)"),
)
self.assertEqual(
force_bytes(inst.orderDetail[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.orderDetail[0].text), force_bytes("IPPB"))
self.assertEqual(
force_bytes(inst.orderDetail[1].text),
force_bytes(
" Initial Settings : Sens: -1 cm H20 Pressure 15 cm H2O moderate flow: Monitor VS every 15 minutes x 4 at the start of mechanical ventilation, then routine for unit OR every 5 hr"
),
)
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("chronic obstructive lung disease (COLD)"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest8(self):
inst = self.instantiate_from("servicerequest-example-ambulation.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest8(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest8(inst2)
def implServiceRequest8(self, inst):
self.assertEqual(inst.authoredOn.date, FHIRDate("2017-03-05").date)
self.assertEqual(inst.authoredOn.as_json(), "2017-03-05")
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("62013009"))
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Ambulating patient (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Ambulation"))
self.assertEqual(force_bytes(inst.id), force_bytes("ambulation"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("45678"))
self.assertEqual(force_bytes(inst.intent), force_bytes("order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest9(self):
inst = self.instantiate_from("servicerequest-example-pt.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest9(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest9(inst2)
def implServiceRequest9(self, inst):
self.assertEqual(inst.authoredOn.date, FHIRDate("2016-09-20").date)
self.assertEqual(inst.authoredOn.as_json(), "2016-09-20")
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].code), force_bytes("36701003")
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].display),
force_bytes("Both knees (body structure)"),
)
self.assertEqual(
force_bytes(inst.bodySite[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.bodySite[0].text), force_bytes("Both knees"))
self.assertEqual(
force_bytes(inst.category[0].coding[0].code), force_bytes("386053000")
)
self.assertEqual(
force_bytes(inst.category[0].coding[0].display),
force_bytes("Evaluation procedure (procedure)"),
)
self.assertEqual(
force_bytes(inst.category[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.category[0].text), force_bytes("Evaluation"))
self.assertEqual(
force_bytes(inst.code.coding[0].code), force_bytes("710830005")
)
self.assertEqual(
force_bytes(inst.code.coding[0].display),
force_bytes("Assessment of passive range of motion (procedure)"),
)
self.assertEqual(
force_bytes(inst.code.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.code.text),
force_bytes("Assessment of passive range of motion"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("physical-therapy"))
self.assertEqual(force_bytes(inst.intent), force_bytes("order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2016-09-27").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2016-09-27")
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("assessment of mobility limitations due to osteoarthritis"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testServiceRequest10(self):
inst = self.instantiate_from("servicerequest-example-di.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceRequest instance")
self.implServiceRequest10(inst)
js = inst.as_json()
self.assertEqual("ServiceRequest", js["resourceType"])
inst2 = servicerequest.ServiceRequest(js)
self.implServiceRequest10(inst2)
def implServiceRequest10(self, inst):
self.assertEqual(force_bytes(inst.code.coding[0].code), force_bytes("24627-2"))
self.assertEqual(
force_bytes(inst.code.coding[0].system), force_bytes("http://loinc.org")
)
self.assertEqual(force_bytes(inst.code.text), force_bytes("Chest CT"))
self.assertEqual(force_bytes(inst.id), force_bytes("di"))
self.assertEqual(force_bytes(inst.intent), force_bytes("original-order"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
inst.occurrenceDateTime.date, FHIRDate("2013-05-08T09:33:27+07:00").date
)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2013-05-08T09:33:27+07:00")
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("Check for metastatic disease"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
| 45.231986
| 196
| 0.64689
| 2,877
| 25,737
| 5.665276
| 0.117136
| 0.193263
| 0.19265
| 0.240812
| 0.829253
| 0.813915
| 0.788269
| 0.748267
| 0.700595
| 0.664335
| 0
| 0.03476
| 0.220888
| 25,737
| 568
| 197
| 45.31162
| 0.778077
| 0.006916
| 0
| 0.499048
| 0
| 0.001905
| 0.172126
| 0.017534
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.04
| false
| 0.00381
| 0.015238
| 0
| 0.059048
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
796503261bc0da4dbd38c017915d6293ed46fe21
| 12,936
|
py
|
Python
|
lithic/types/account_holder_create_params.py
|
lithic-com/lithic-python
|
be19d7195ebdf217b45f1ab59b39021d51330989
|
[
"Apache-2.0"
] | null | null | null |
lithic/types/account_holder_create_params.py
|
lithic-com/lithic-python
|
be19d7195ebdf217b45f1ab59b39021d51330989
|
[
"Apache-2.0"
] | null | null | null |
lithic/types/account_holder_create_params.py
|
lithic-com/lithic-python
|
be19d7195ebdf217b45f1ab59b39021d51330989
|
[
"Apache-2.0"
] | null | null | null |
# File generated from our OpenAPI spec by Stainless.
from __future__ import annotations
from typing import Optional, Union, List
from typing_extensions import Literal, TypedDict, Required
from ..types import shared_params
__all__ = [
"KYCIndividualAddress",
"KYCIndividual",
"KYC",
"KybBusinessEntityAddress",
"KybBusinessEntity",
"KybBeneficialOwnerEntitiesAddress",
"KybBeneficialOwnerEntities",
"KybBeneficialOwnerIndividualsAddress",
"KybBeneficialOwnerIndividuals",
"KybControlPersonAddress",
"KybControlPerson",
"Kyb",
"AccountHolderCreateParams",
]
class KYCIndividualAddress(TypedDict, total=False):
address1: Required[str]
"""Valid deliverable address (no PO boxes)."""
city: Required[str]
"""Name of city."""
country: Required[str]
"""Valid country code. Only USA is currently supported, entered in uppercase ISO 3166-1 alpha-3 three-character format."""
postal_code: Required[str]
"""Valid postal code. Only USA ZIP codes are currently supported, entered as a five-digit ZIP or nine-digit ZIP+4."""
state: Required[str]
"""Valid state code. Only USA state codes are currently supported, entered in uppercase ISO 3166-2 two-character format."""
address2: str
"""Unit or apartment number (if applicable)."""
class KYCIndividual(TypedDict, total=False):
address: Required[KYCIndividualAddress]
"""Individual's current address - PO boxes, UPS drops, and FedEx drops are not acceptable; APO/FPO are acceptable. Only USA addresses are currently supported."""
dob: Required[str]
"""Individual's date of birth, as an ISO 8601 date."""
email: Required[str]
"""Individual's email address. If utilizing Lithic for chargeback processing, this customer email address may be used to communicate dispute status and resolution."""
first_name: Required[str]
"""Individual's first name, as it appears on government-issued identity documents."""
government_id: Required[str]
"""Government-issued identification number (required for identity verification and compliance with banking regulations). Social Security Numbers (SSN) and Individual Taxpayer Identification Numbers (ITIN) are currently supported, entered as full nine-digits, with or without hyphens"""
last_name: Required[str]
"""Individual's last name, as it appears on government-issued identity documents."""
phone_number: Required[str]
"""Individual's phone number, entered in E.164 format."""
class KYC(TypedDict, total=False):
individual: Required[KYCIndividual]
"""Information on individual for whom the account is being opened and KYC is being run."""
tos_timestamp: Required[str]
"""An ISO 8601 timestamp indicating when Lithic's terms of service were accepted by the API customer."""
workflow: Required[Literal["KYC_ADVANCED", "KYC_BASIC", "KYC_BYO"]]
"""Specifies the type of KYC workflow to run."""
kyc_passed_timestamp: str
"""An ISO 8601 timestamp indicating when precomputed KYC was completed on the individual with a pass result. This field is required only if workflow type is `KYC_BYO`."""
class KybBusinessEntityAddress(TypedDict, total=False):
address1: Required[str]
"""Valid deliverable address (no PO boxes)."""
city: Required[str]
"""Name of city."""
country: Required[str]
"""Valid country code. Only USA is currently supported, entered in uppercase ISO 3166-1 alpha-3 three-character format."""
postal_code: Required[str]
"""Valid postal code. Only USA ZIP codes are currently supported, entered as a five-digit ZIP or nine-digit ZIP+4."""
state: Required[str]
"""Valid state code. Only USA state codes are currently supported, entered in uppercase ISO 3166-2 two-character format."""
address2: str
"""Unit or apartment number (if applicable)."""
class KybBusinessEntity(TypedDict, total=False):
address: Required[KybBusinessEntityAddress]
"""Business's physical address - PO boxes, UPS drops, and FedEx drops are not acceptable; APO/FPO are acceptable."""
government_id: Required[str]
"""Government-issued identification number. US Federal Employer Identification Numbers (EIN) are currently supported, entered as full nine-digits, with or without hyphens."""
legal_business_name: Required[str]
"""Legal (formal) business name."""
phone_numbers: Required[List[str]]
"""One or more of the business's phone number(s), entered as a list in E.164 format."""
dba_business_name: str
"""Any name that the business operates under that is not its legal business name (if applicable)."""
parent_company: str
"""Parent company name (if applicable)."""
class KybBeneficialOwnerEntitiesAddress(TypedDict, total=False):
address1: Required[str]
"""Valid deliverable address (no PO boxes)."""
city: Required[str]
"""Name of city."""
country: Required[str]
"""Valid country code. Only USA is currently supported, entered in uppercase ISO 3166-1 alpha-3 three-character format."""
postal_code: Required[str]
"""Valid postal code. Only USA ZIP codes are currently supported, entered as a five-digit ZIP or nine-digit ZIP+4."""
state: Required[str]
"""Valid state code. Only USA state codes are currently supported, entered in uppercase ISO 3166-2 two-character format."""
address2: str
"""Unit or apartment number (if applicable)."""
class KybBeneficialOwnerEntities(TypedDict, total=False):
address: Required[KybBeneficialOwnerEntitiesAddress]
"""Business's physical address - PO boxes, UPS drops, and FedEx drops are not acceptable; APO/FPO are acceptable."""
government_id: Required[str]
"""Government-issued identification number. US Federal Employer Identification Numbers (EIN) are currently supported, entered as full nine-digits, with or without hyphens."""
legal_business_name: Required[str]
"""Legal (formal) business name."""
phone_numbers: Required[List[str]]
"""One or more of the business's phone number(s), entered as a list in E.164 format."""
dba_business_name: str
"""Any name that the business operates under that is not its legal business name (if applicable)."""
parent_company: str
"""Parent company name (if applicable)."""
class KybBeneficialOwnerIndividualsAddress(TypedDict, total=False):
address1: Required[str]
"""Valid deliverable address (no PO boxes)."""
city: Required[str]
"""Name of city."""
country: Required[str]
"""Valid country code. Only USA is currently supported, entered in uppercase ISO 3166-1 alpha-3 three-character format."""
postal_code: Required[str]
"""Valid postal code. Only USA ZIP codes are currently supported, entered as a five-digit ZIP or nine-digit ZIP+4."""
state: Required[str]
"""Valid state code. Only USA state codes are currently supported, entered in uppercase ISO 3166-2 two-character format."""
address2: str
"""Unit or apartment number (if applicable)."""
class KybBeneficialOwnerIndividuals(TypedDict, total=False):
address: Required[KybBeneficialOwnerIndividualsAddress]
"""Individual's current address - PO boxes, UPS drops, and FedEx drops are not acceptable; APO/FPO are acceptable. Only USA addresses are currently supported."""
dob: Required[str]
"""Individual's date of birth, as an ISO 8601 date."""
email: Required[str]
"""Individual's email address. If utilizing Lithic for chargeback processing, this customer email address may be used to communicate dispute status and resolution."""
first_name: Required[str]
"""Individual's first name, as it appears on government-issued identity documents."""
government_id: Required[str]
"""Government-issued identification number (required for identity verification and compliance with banking regulations). Social Security Numbers (SSN) and Individual Taxpayer Identification Numbers (ITIN) are currently supported, entered as full nine-digits, with or without hyphens"""
last_name: Required[str]
"""Individual's last name, as it appears on government-issued identity documents."""
phone_number: Required[str]
"""Individual's phone number, entered in E.164 format."""
class KybControlPersonAddress(TypedDict, total=False):
address1: Required[str]
"""Valid deliverable address (no PO boxes)."""
city: Required[str]
"""Name of city."""
country: Required[str]
"""Valid country code. Only USA is currently supported, entered in uppercase ISO 3166-1 alpha-3 three-character format."""
postal_code: Required[str]
"""Valid postal code. Only USA ZIP codes are currently supported, entered as a five-digit ZIP or nine-digit ZIP+4."""
state: Required[str]
"""Valid state code. Only USA state codes are currently supported, entered in uppercase ISO 3166-2 two-character format."""
address2: str
"""Unit or apartment number (if applicable)."""
class KybControlPerson(TypedDict, total=False):
address: Required[KybControlPersonAddress]
"""Individual's current address - PO boxes, UPS drops, and FedEx drops are not acceptable; APO/FPO are acceptable. Only USA addresses are currently supported."""
dob: Required[str]
"""Individual's date of birth, as an ISO 8601 date."""
email: Required[str]
"""Individual's email address. If utilizing Lithic for chargeback processing, this customer email address may be used to communicate dispute status and resolution."""
first_name: Required[str]
"""Individual's first name, as it appears on government-issued identity documents."""
government_id: Required[str]
"""Government-issued identification number (required for identity verification and compliance with banking regulations). Social Security Numbers (SSN) and Individual Taxpayer Identification Numbers (ITIN) are currently supported, entered as full nine-digits, with or without hyphens"""
last_name: Required[str]
"""Individual's last name, as it appears on government-issued identity documents."""
phone_number: Required[str]
"""Individual's phone number, entered in E.164 format."""
class Kyb(TypedDict, total=False):
beneficial_owner_entities: Required[List[KybBeneficialOwnerEntities]]
"""List of all entities with >25% ownership in the company. If no entity or individual owns >25% of the company, and the largest shareholder is an entity, please identify them in this field. See [FinCEN requirements](https://www.fincen.gov/sites/default/files/shared/CDD_Rev6.7_Sept_2017_Certificate.pdf) (Section I) for more background. If no business owner is an entity, pass in an empty list. However, either this parameter or `beneficial_owner_individuals` must be populated. on entities that should be included."""
beneficial_owner_individuals: Required[List[KybBeneficialOwnerIndividuals]]
"""List of all individuals with >25% ownership in the company. If no entity or individual owns >25% of the company, and the largest shareholder is an individual, please identify them in this field. See [FinCEN requirements](https://www.fincen.gov/sites/default/files/shared/CDD_Rev6.7_Sept_2017_Certificate.pdf) (Section I) for more background on individuals that should be included. If no individual is an entity, pass in an empty list. However, either this parameter or `beneficial_owner_entities` must be populated."""
business_entity: Required[KybBusinessEntity]
"""Information for business for which the account is being opened and KYB is being run."""
control_person: Required[KybControlPerson]
"""An individual with significant responsibility for managing the legal entity (e.g., a Chief Executive Officer, Chief Financial Officer, Chief Operating Officer, Managing Member, General Partner, President, Vice President, or Treasurer). This can be an executive, or someone who will have program-wide access to the cards that Lithic will provide. In some cases, this individual could also be a beneficial owner listed above. See [FinCEN requirements](https://www.fincen.gov/sites/default/files/shared/CDD_Rev6.7_Sept_2017_Certificate.pdf) (Section II) for more background."""
nature_of_business: Required[str]
"""Short description of the company's line of business (i.e., what does the company do?)."""
tos_timestamp: Required[str]
"""An ISO 8601 timestamp indicating when Lithic's terms of service were accepted by the API customer."""
website_url: Required[str]
"""Company website URL."""
workflow: Required[Literal["KYB_BASIC", "KYB_BYO"]]
"""Specifies the type of KYB workflow to run."""
kyb_passed_timestamp: str
"""An ISO 8601 timestamp indicating when precomputed KYC was completed on the business with a pass result. This field is required only if workflow type is `KYB_BYO`."""
AccountHolderCreateParams = Union[KYC, Kyb]
| 46.532374
| 581
| 0.7374
| 1,707
| 12,936
| 5.541886
| 0.164616
| 0.059302
| 0.033827
| 0.044397
| 0.781184
| 0.758774
| 0.753277
| 0.753277
| 0.753277
| 0.753277
| 0
| 0.012915
| 0.167981
| 12,936
| 277
| 582
| 46.700361
| 0.866022
| 0.003865
| 0
| 0.555556
| 1
| 0
| 0.074766
| 0.046969
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.018519
| 0.037037
| 0
| 0.851852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
799279249bf2f662732bddceb61d3225979a7a1c
| 102
|
py
|
Python
|
src/behavior_tree_learning/core/plotter/print_functions.py
|
dgerod/behavior_tree_learning
|
71da80c91ecd48fd5da377f83604b62112ba9629
|
[
"Apache-2.0"
] | 7
|
2022-02-09T12:51:51.000Z
|
2022-03-19T14:40:16.000Z
|
src/behavior_tree_learning/core/plotter/print_functions.py
|
dgerod/bt_learning_using_gp
|
ac1fb6ba4dbd6d18b5d002c7ad2647771f8b0fb9
|
[
"Apache-2.0"
] | 6
|
2021-12-12T15:38:40.000Z
|
2022-01-31T11:02:12.000Z
|
src/behavior_tree_learning/core/plotter/print_functions.py
|
dgerod/bt_learning_using_gp
|
ac1fb6ba4dbd6d18b5d002c7ad2647771f8b0fb9
|
[
"Apache-2.0"
] | null | null | null |
import py_trees as pt
def print_ascii_tree(py_tree):
print(pt.display.ascii_tree(py_tree.root))
| 17
| 46
| 0.77451
| 19
| 102
| 3.842105
| 0.578947
| 0.246575
| 0.30137
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127451
| 102
| 5
| 47
| 20.4
| 0.820225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 9
|
79942b2d7989666bf2aaeeaa75032db470ecb82b
| 1,354
|
py
|
Python
|
scripts/Qubit/TimeDomain/swap_batch/batch.py
|
sourav-majumder/qtlab
|
96b2a127b1df7b45622c90229bd5ef8a4083614e
|
[
"MIT"
] | null | null | null |
scripts/Qubit/TimeDomain/swap_batch/batch.py
|
sourav-majumder/qtlab
|
96b2a127b1df7b45622c90229bd5ef8a4083614e
|
[
"MIT"
] | null | null | null |
scripts/Qubit/TimeDomain/swap_batch/batch.py
|
sourav-majumder/qtlab
|
96b2a127b1df7b45622c90229bd5ef8a4083614e
|
[
"MIT"
] | null | null | null |
# execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_30.py')
# execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_100.py')
#execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_200.py')
#execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_300.py')
#execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_400.py')
#execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_500.py')
#execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_600.py')
#execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_800.py')
#execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_1000.py')
execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_2000.py')
execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_3000.py')
execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_4000.py')
execfile(r'C:\qtlab-aalto\scripts\Qubit\TimeDomain\swap_batch\0dbm_swap\swap_with_high_pulse_5000.py')
| 96.714286
| 104
| 0.838257
| 234
| 1,354
| 4.517094
| 0.119658
| 0.110691
| 0.12299
| 0.184484
| 0.957427
| 0.957427
| 0.957427
| 0.957427
| 0.957427
| 0.957427
| 0
| 0.042232
| 0.020679
| 1,354
| 13
| 105
| 104.153846
| 0.754902
| 0.672083
| 0
| 0
| 0
| 0
| 0.845606
| 0.845606
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
79951e1e449db66902a7aaacb3a90bafd0c07b7c
| 1,725
|
py
|
Python
|
tests/perf/test-assign-reg.py
|
wenq1/duktape
|
5ed3eee19b291f3b3de0b212cc62c0aba0ab4ecb
|
[
"MIT"
] | 4,268
|
2015-01-01T17:33:40.000Z
|
2022-03-31T17:53:31.000Z
|
tests/perf/test-assign-reg.py
|
KiraanRK/esp32-duktape
|
1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa
|
[
"MIT"
] | 1,667
|
2015-01-01T22:43:03.000Z
|
2022-02-23T22:27:19.000Z
|
tests/perf/test-assign-reg.py
|
KiraanRK/esp32-duktape
|
1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa
|
[
"MIT"
] | 565
|
2015-01-08T14:15:28.000Z
|
2022-03-31T16:29:31.000Z
|
def test():
r0 = 123.0
r1 = 123.1
r2 = 123.2
r3 = 123.3
r4 = 123.4
r5 = 123.5
r6 = 123.6
r7 = 123.7
r8 = 123.8
r9 = 123.9
i = 0
while i < 1e7:
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
t = r0
t = r1
t = r2
t = r3
t = r4
t = r5
t = r6
t = r7
t = r8
t = r9
i += 1
test()
| 13.476563
| 18
| 0.222609
| 240
| 1,725
| 1.6
| 0.1125
| 0.078125
| 0.104167
| 0.15625
| 0.78125
| 0.78125
| 0.78125
| 0.78125
| 0.78125
| 0.78125
| 0
| 0.299611
| 0.702029
| 1,725
| 127
| 19
| 13.582677
| 0.447471
| 0
| 0
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008696
| false
| 0
| 0
| 0
| 0.008696
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
79ce249ed07f688f366151f9c1a12a13ac191dfe
| 95,909
|
py
|
Python
|
src/python/k4a/tests/test_functional_ctypes_azurekinect.py
|
seanyen/Azure-Kinect-Sensor-SDK
|
d87ef578676c05b9a5d23c097502942753bf3777
|
[
"MIT"
] | 1,120
|
2019-05-07T06:01:44.000Z
|
2022-03-28T08:02:29.000Z
|
src/python/k4a/tests/test_functional_ctypes_azurekinect.py
|
seanyen/Azure-Kinect-Sensor-SDK
|
d87ef578676c05b9a5d23c097502942753bf3777
|
[
"MIT"
] | 1,321
|
2019-05-07T14:37:15.000Z
|
2022-03-31T12:03:01.000Z
|
src/python/k4a/tests/test_functional_ctypes_azurekinect.py
|
seanyen/Azure-Kinect-Sensor-SDK
|
d87ef578676c05b9a5d23c097502942753bf3777
|
[
"MIT"
] | 529
|
2019-05-06T22:25:33.000Z
|
2022-03-31T13:57:26.000Z
|
'''
test_k4a_azurekinect.py
Tests for the k4a functions for Azure Kinect device.
Copyright (C) Microsoft Corporation. All rights reserved.
'''
import unittest
import ctypes
from time import sleep
import k4a
import test_config
def get_1080p_bgr32_nfov_2x2binned(device_handle):
return test_config.get_capture(device_handle,
k4a.EImageFormat.COLOR_BGRA32,
k4a.EColorResolution.RES_1080P,
k4a.EDepthMode.NFOV_2X2BINNED)
def k4a_device_get_color_control_capability(
device_handle:k4a._bindings.k4a._DeviceHandle,
color_control_command:k4a.EColorControlCommand
)->k4a.EStatus:
supports_auto = ctypes.c_bool(False)
min_value = ctypes.c_int32(0)
max_value = ctypes.c_int32(0)
step_value = ctypes.c_int32(0)
default_value = ctypes.c_int32(0)
color_control_mode = ctypes.c_int32(k4a.EColorControlMode.AUTO.value)
status = k4a._bindings.k4a.k4a_device_get_color_control_capabilities(
device_handle,
color_control_command,
ctypes.byref(supports_auto),
ctypes.byref(min_value),
ctypes.byref(max_value),
ctypes.byref(step_value),
ctypes.byref(default_value),
ctypes.byref(color_control_mode),
)
return status
def k4a_device_set_and_get_color_control(
device_handle:k4a._bindings.k4a._DeviceHandle,
color_control_command:k4a.EColorControlCommand):
mode = ctypes.c_int32(k4a.EColorControlMode.MANUAL.value)
saved_value = ctypes.c_int32(0)
# Get the step size.
supports_auto = ctypes.c_bool(False)
min_value = ctypes.c_int32(0)
max_value = ctypes.c_int32(0)
step_value = ctypes.c_int32(0)
default_value = ctypes.c_int32(0)
color_control_mode = ctypes.c_int32(k4a.EColorControlMode.MANUAL.value)
status = k4a._bindings.k4a.k4a_device_get_color_control_capabilities(
device_handle,
color_control_command,
ctypes.byref(supports_auto),
ctypes.byref(min_value),
ctypes.byref(max_value),
ctypes.byref(step_value),
ctypes.byref(default_value),
ctypes.byref(color_control_mode),
)
mode = color_control_mode
# Read the original value.
status0 = k4a._bindings.k4a.k4a_device_get_color_control(
device_handle,
ctypes.c_int(color_control_command.value),
ctypes.byref(mode),
ctypes.byref(saved_value))
# Write a new value.
new_value = ctypes.c_int32(0)
if (saved_value.value + step_value.value <= max_value.value):
new_value = ctypes.c_int32(saved_value.value + step_value.value)
else:
new_value = ctypes.c_int32(saved_value.value - step_value.value)
status1 = k4a._bindings.k4a.k4a_device_set_color_control(
device_handle,
ctypes.c_int(color_control_command.value),
mode,
new_value)
# Read back the value to check that it was written.
new_value_readback = ctypes.c_int32(0)
status2 = k4a._bindings.k4a.k4a_device_get_color_control(
device_handle,
ctypes.c_int(color_control_command.value),
ctypes.byref(mode),
ctypes.byref(new_value_readback))
# Write the original saved value.
status3 = k4a._bindings.k4a.k4a_device_set_color_control(
device_handle,
ctypes.c_int(color_control_command.value),
mode,
saved_value)
# Read back the value to check that it was written.
saved_value_readback = ctypes.c_int32(0)
status4 = k4a._bindings.k4a.k4a_device_get_color_control(
device_handle,
ctypes.c_int(color_control_command.value),
ctypes.byref(mode),
ctypes.byref(saved_value_readback))
return (status, status0, status1, status2, status3, status4,
saved_value, saved_value_readback, new_value, new_value_readback)
class Test_Functional_Ctypes_AzureKinect(unittest.TestCase):
'''Test k4a functions requiring a device handle for Azure Kinect device.
'''
@classmethod
def setUpClass(cls):
cls.device_handle = k4a._bindings.k4a._DeviceHandle()
status = k4a._bindings.k4a.k4a_device_open(ctypes.c_uint32(0), ctypes.byref(cls.device_handle))
assert(k4a.K4A_SUCCEEDED(status))
cls.lock = test_config.glb_lock
@classmethod
def tearDownClass(cls):
if test_config.glb_capture is not None:
k4a._bindings.k4a.k4a_capture_release(test_config.glb_capture)
test_config.glb_capture = None
# Stop the cameras and imus before closing device.
k4a._bindings.k4a.k4a_device_stop_cameras(cls.device_handle)
k4a._bindings.k4a.k4a_device_stop_imu(cls.device_handle)
k4a._bindings.k4a.k4a_device_close(cls.device_handle)
def test_functional_fast_ctypes_device_open_twice_expected_fail(self):
device_handle_2 = k4a._bindings.k4a._DeviceHandle()
status = k4a._bindings.k4a.k4a_device_open(ctypes.c_uint32(0), ctypes.byref(device_handle_2))
self.assertTrue(k4a.K4A_FAILED(status))
status = k4a._bindings.k4a.k4a_device_open(ctypes.c_uint32(1000000), ctypes.byref(device_handle_2))
self.assertTrue(k4a.K4A_FAILED(status))
def test_functional_fast_ctypes_device_get_installed_count(self):
device_count = k4a._bindings.k4a.k4a_device_get_installed_count()
self.assertGreater(device_count, 0)
@unittest.skip
def test_functional_fast_ctypes_set_debug_message_handler_NULL_callback(self):
status = k4a._bindings.k4a_set_debug_message_handler(
ctypes.cast(ctypes.c_void_p(), ctypes.POINTER(k4a.logging_message_cb)),
ctypes.c_void_p(),
k4a.ELogLevel.TRACE)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
@unittest.skip
def test_functional_fast_ctypes_set_debug_message_handler_callback(self):
logger_cb = k4a.logging_message_cb(glb_print_message)
context = ctypes.c_void_p()
status = k4a._bindings.k4a_set_debug_message_handler(
ctypes.byref(logger_cb),
context,
k4a.ELogLevel.TRACE
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
def test_functional_fast_ctypes_device_get_capture(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
def test_functional_fast_ctypes_device_get_imu_sample(self):
with self.lock:
device_config = k4a.DEVICE_CONFIG_BGRA32_1080P_NFOV_2X2BINNED_FPS15
status = k4a._bindings.k4a.k4a_device_start_cameras(self.device_handle, ctypes.byref(device_config))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
status = k4a._bindings.k4a.k4a_device_start_imu(self.device_handle)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
imu_sample = k4a.ImuSample()
timeout_ms = ctypes.c_int32(1000)
status = k4a._bindings.k4a.k4a_device_get_imu_sample(
self.device_handle,
ctypes.byref(imu_sample),
timeout_ms
)
# Stop imu and cameras.
k4a._bindings.k4a.k4a_device_stop_imu(self.device_handle)
k4a._bindings.k4a.k4a_device_stop_cameras(self.device_handle)
self.assertEqual(status, k4a.EWaitStatus.SUCCEEDED)
self.assertNotAlmostEqual(imu_sample.temperature, 0.0)
#self.assertNotAlmostEqual(imu_sample.acc_sample.xyz.x, 0.0)
#self.assertNotAlmostEqual(imu_sample.acc_sample.xyz.y, 0.0)
#self.assertNotAlmostEqual(imu_sample.acc_sample.xyz.z, 0.0)
self.assertNotEqual(imu_sample.acc_timestamp_usec, 0)
#self.assertNotAlmostEqual(imu_sample.gyro_sample.xyz.x, 0.0)
#self.assertNotAlmostEqual(imu_sample.gyro_sample.xyz.y, 0.0)
#self.assertNotAlmostEqual(imu_sample.gyro_sample.xyz.z, 0.0)
self.assertNotEqual(imu_sample.gyro_timestamp_usec, 0.0)
def test_functional_fast_ctypes_capture_create(self):
capture = k4a._bindings.k4a._CaptureHandle()
status = k4a._bindings.k4a.k4a_capture_create(ctypes.byref(capture))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_capture_reference(capture)
k4a._bindings.k4a.k4a_capture_release(capture)
k4a._bindings.k4a.k4a_capture_release(capture)
def test_functional_fast_ctypes_capture_get_color_image(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
self.assertIsInstance(color_image, k4a._bindings.k4a._ImageHandle)
k4a._bindings.k4a.k4a_image_release(color_image)
def test_functional_fast_ctypes_capture_get_depth_image(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
self.assertIsInstance(depth_image, k4a._bindings.k4a._ImageHandle)
k4a._bindings.k4a.k4a_image_release(depth_image)
def test_functional_fast_ctypes_capture_get_ir_image(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
ir_image = k4a._bindings.k4a.k4a_capture_get_ir_image(capture)
self.assertIsInstance(ir_image, k4a._bindings.k4a._ImageHandle)
k4a._bindings.k4a.k4a_image_release(ir_image)
def test_functional_fast_ctypes_image_create(self):
image_format = k4a.EImageFormat.COLOR_BGRA32
width_pixels = 512
height_pixels = 512
stride_pixels = 4*512
image_handle = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(ctypes.c_int(image_format.value),
width_pixels, height_pixels, stride_pixels, ctypes.byref(image_handle))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Check that the created image has properties requested.
created_image_format = k4a._bindings.k4a.k4a_image_get_format(image_handle)
created_image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(image_handle)
created_image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(image_handle)
created_image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(image_handle)
k4a._bindings.k4a.k4a_image_release(image_handle)
self.assertEqual(image_format, created_image_format)
self.assertEqual(width_pixels, created_image_width_pixels)
self.assertEqual(height_pixels, created_image_height_pixels)
self.assertEqual(stride_pixels, created_image_stride_bytes)
def test_functional_fast_ctypes_capture_set_color_image(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
# Grab the current color image.
saved_color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Create a new image.
image_format = k4a.EImageFormat.COLOR_BGRA32
width_pixels = ctypes.c_int(512)
height_pixels = ctypes.c_int(512)
stride_bytes = ctypes.c_int(4*512)
image_handle = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(ctypes.c_int(image_format.value),
width_pixels, height_pixels, stride_bytes, ctypes.byref(image_handle))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Check that the created image has the expected properties.
image_format = k4a._bindings.k4a.k4a_image_get_format(image_handle)
image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(image_handle)
image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(image_handle)
image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(image_handle)
self.assertEqual(image_format, k4a.EImageFormat.COLOR_BGRA32)
self.assertEqual(image_width_pixels, 512)
self.assertEqual(image_height_pixels, 512)
self.assertEqual(image_stride_bytes, 512*4)
# Replace the saved image with the created one.
k4a._bindings.k4a.k4a_capture_set_color_image(capture, image_handle)
k4a._bindings.k4a.k4a_image_release(image_handle)
# Get a new image. It should be identical to the created one.
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Test that the new image has characteristics of the created image.
color_image_format = k4a._bindings.k4a.k4a_image_get_format(color_image)
color_image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(color_image)
color_image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(color_image)
color_image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
# Now put back the saved color image into the capture.
k4a._bindings.k4a.k4a_capture_set_color_image(capture, saved_color_image)
k4a._bindings.k4a.k4a_image_release(saved_color_image)
# Test that the image has characteristics of the saved color image.
saved_color_image2 = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
saved_color_image_format = k4a._bindings.k4a.k4a_image_get_format(saved_color_image2)
saved_color_image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(saved_color_image2)
saved_color_image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(saved_color_image2)
saved_color_image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(saved_color_image2)
k4a._bindings.k4a.k4a_image_release(saved_color_image2)
self.assertEqual(color_image_format, k4a.EImageFormat.COLOR_BGRA32)
self.assertEqual(color_image_width_pixels, 512)
self.assertEqual(color_image_height_pixels, 512)
self.assertEqual(color_image_stride_bytes, 512*4)
self.assertEqual(saved_color_image_format, k4a.EImageFormat.COLOR_BGRA32)
self.assertEqual(saved_color_image_width_pixels, 1920)
self.assertEqual(saved_color_image_height_pixels, 1080)
self.assertEqual(saved_color_image_stride_bytes, 1920*4)
def test_functional_fast_ctypes_capture_set_depth_image(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
# Grab the current depth image and add a reference to it so it is not destroyed.
saved_depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
# Create a new image.
image_format = k4a.EImageFormat.DEPTH16
width_pixels = ctypes.c_int(512)
height_pixels = ctypes.c_int(512)
stride_bytes = ctypes.c_int(4*512)
image_handle = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(ctypes.c_int(image_format.value),
width_pixels, height_pixels, stride_bytes, ctypes.byref(image_handle))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Replace the saved image with the created one.
k4a._bindings.k4a.k4a_capture_set_depth_image(capture, image_handle)
k4a._bindings.k4a.k4a_image_release(image_handle)
# Get a new image. It should be identical to the created one.
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
# Test that the new image has characteristics of the created image.
depth_image_format = k4a._bindings.k4a.k4a_image_get_format(depth_image)
depth_image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(depth_image)
depth_image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(depth_image)
depth_image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(depth_image)
k4a._bindings.k4a.k4a_image_release(depth_image)
# Now put back the saved color image into the capture.
k4a._bindings.k4a.k4a_capture_set_depth_image(capture, saved_depth_image)
k4a._bindings.k4a.k4a_image_release(saved_depth_image)
# Test that the image has characteristics of the saved depth image.
saved_depth_image2 = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
saved_depth_image_format = k4a._bindings.k4a.k4a_image_get_format(saved_depth_image2)
saved_depth_image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(saved_depth_image)
saved_depth_image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(saved_depth_image)
saved_depth_image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(saved_depth_image)
k4a._bindings.k4a.k4a_image_release(saved_depth_image2)
self.assertEqual(depth_image_format, k4a.EImageFormat.DEPTH16)
self.assertEqual(depth_image_width_pixels, 512)
self.assertEqual(depth_image_height_pixels, 512)
self.assertEqual(depth_image_stride_bytes, 512*4)
self.assertEqual(saved_depth_image_format, k4a.EImageFormat.DEPTH16)
self.assertEqual(saved_depth_image_width_pixels, 320)
self.assertEqual(saved_depth_image_height_pixels, 288)
self.assertEqual(saved_depth_image_stride_bytes, 320*2)
def test_functional_fast_ctypes_capture_set_ir_image(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
# Grab the current depth image and add a reference to it so it is not destroyed.
saved_ir_image = k4a._bindings.k4a.k4a_capture_get_ir_image(capture)
# Create a new image.
image_format = k4a.EImageFormat.IR16
width_pixels = ctypes.c_int(512)
height_pixels = ctypes.c_int(512)
stride_bytes = ctypes.c_int(4*512)
image_handle = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(ctypes.c_int(image_format.value),
width_pixels, height_pixels, stride_bytes, ctypes.byref(image_handle))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Replace the saved image with the created one.
k4a._bindings.k4a.k4a_capture_set_ir_image(capture, image_handle)
k4a._bindings.k4a.k4a_image_release(image_handle)
# Get a new image. It should be identical to the created one.
ir_image = k4a._bindings.k4a.k4a_capture_get_ir_image(capture)
# Test that the new image has characteristics of the created image.
ir_image_format = k4a._bindings.k4a.k4a_image_get_format(ir_image)
ir_image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(ir_image)
ir_image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(ir_image)
ir_image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(ir_image)
k4a._bindings.k4a.k4a_image_release(ir_image)
# Now put back the saved color image into the capture.
k4a._bindings.k4a.k4a_capture_set_ir_image(capture, saved_ir_image)
k4a._bindings.k4a.k4a_image_release(saved_ir_image)
# Test that the image has characteristics of the saved depth image.
saved_ir_image2 = k4a._bindings.k4a.k4a_capture_get_ir_image(capture)
saved_ir_image_format = k4a._bindings.k4a.k4a_image_get_format(saved_ir_image2)
saved_ir_image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(saved_ir_image2)
saved_ir_image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(saved_ir_image2)
saved_ir_image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(saved_ir_image2)
k4a._bindings.k4a.k4a_image_release(saved_ir_image2)
self.assertEqual(ir_image_format, k4a.EImageFormat.IR16)
self.assertEqual(ir_image_width_pixels, 512)
self.assertEqual(ir_image_height_pixels, 512)
self.assertEqual(ir_image_stride_bytes, 512*4)
self.assertEqual(saved_ir_image_format, k4a.EImageFormat.IR16)
self.assertEqual(saved_ir_image_width_pixels, 320)
self.assertEqual(saved_ir_image_height_pixels, 288)
self.assertEqual(saved_ir_image_stride_bytes, 320*2)
def test_functional_fast_ctypes_capture_get_temperature_c(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
temperature_c = k4a._bindings.k4a.k4a_capture_get_temperature_c(capture)
self.assertNotAlmostEqual(temperature_c, 0.0, 2)
def test_functional_fast_ctypes_capture_set_temperature_c(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
absolute_zero_temperature_c = -277.15
k4a._bindings.k4a.k4a_capture_set_temperature_c(capture, absolute_zero_temperature_c)
temperature_c = k4a._bindings.k4a.k4a_capture_get_temperature_c(capture)
self.assertAlmostEqual(temperature_c, absolute_zero_temperature_c, 2)
def test_functional_fast_ctypes_image_get_buffer(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
buffer_ptr = k4a._bindings.k4a.k4a_image_get_buffer(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertIsNotNone(ctypes.cast(buffer_ptr, ctypes.c_void_p).value)
def test_functional_fast_ctypes_image_get_buffer_None(self):
buffer_ptr = k4a._bindings.k4a.k4a_image_get_buffer(None)
self.assertIsNone(ctypes.cast(buffer_ptr, ctypes.c_void_p).value)
def test_functional_fast_ctypes_image_get_size(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
color_image_size_bytes = k4a._bindings.k4a.k4a_image_get_size(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(color_image_size_bytes, 1080*1920*4)
def test_functional_fast_ctypes_image_get_format(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
color_image_format = k4a._bindings.k4a.k4a_image_get_format(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(color_image_format, k4a.EImageFormat.COLOR_BGRA32)
def test_functional_fast_ctypes_image_get_width_pixels(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
color_image_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(color_image_width_pixels, 1920)
def test_functional_fast_ctypes_image_get_height_pixels(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
color_image_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(color_image_height_pixels, 1080)
def test_functional_fast_ctypes_image_get_stride_bytes(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
color_image_stride_bytes = k4a._bindings.k4a.k4a_image_get_stride_bytes(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(color_image_stride_bytes, 1920*4)
def test_functional_fast_ctypes_image_get_device_timestamp_usec(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
device_timestamp_usec = k4a._bindings.k4a.k4a_image_get_device_timestamp_usec(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertIsInstance(device_timestamp_usec, int)
self.assertNotEqual(device_timestamp_usec, 0) # Strictly not always the case.
def test_functional_fast_ctypes_image_get_system_timestamp_nsec(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
system_timestamp_nsec = k4a._bindings.k4a.k4a_image_get_system_timestamp_nsec(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertIsInstance(system_timestamp_nsec, int)
self.assertNotEqual(system_timestamp_nsec, 0) # Strictly not always the case.
def test_functional_fast_ctypes_image_get_exposure_usec(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
exposure_usec = k4a._bindings.k4a.k4a_image_get_exposure_usec(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertIsInstance(exposure_usec, int)
self.assertNotEqual(exposure_usec, 0) # Strictly not always the case.
def test_functional_fast_ctypes_image_get_white_balance(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
white_balance = k4a._bindings.k4a.k4a_image_get_white_balance(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertIsInstance(white_balance, int)
self.assertNotEqual(white_balance, 0) # Strictly not always the case.
def test_functional_fast_ctypes_image_get_iso_speed(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
iso_speed = k4a._bindings.k4a.k4a_image_get_iso_speed(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertIsInstance(iso_speed, int)
self.assertNotEqual(iso_speed, 0) # Strictly not always the case.
def test_functional_fast_ctypes_image_set_device_timestamp_usec(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Save the original value.
saved_value = k4a._bindings.k4a.k4a_image_get_device_timestamp_usec(color_image)
# Set a new value and read it back.
new_value = saved_value + 1
k4a._bindings.k4a.k4a_image_set_device_timestamp_usec(color_image, new_value)
new_value_readback = k4a._bindings.k4a.k4a_image_get_device_timestamp_usec(color_image)
# Set the original value on the device and read it back.
k4a._bindings.k4a.k4a_image_set_device_timestamp_usec(color_image, saved_value)
saved_value_readback = k4a._bindings.k4a.k4a_image_get_device_timestamp_usec(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(new_value_readback, new_value)
self.assertEqual(saved_value_readback, saved_value)
self.assertNotEqual(new_value, saved_value)
self.assertNotEqual(saved_value_readback, new_value_readback)
def test_functional_fast_ctypes_image_set_system_timestamp_nsec(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Save the original value.
saved_value = k4a._bindings.k4a.k4a_image_get_system_timestamp_nsec(color_image)
# Set a new value and read it back.
new_value = saved_value + 1
k4a._bindings.k4a.k4a_image_set_system_timestamp_nsec(color_image, new_value)
new_value_readback = k4a._bindings.k4a.k4a_image_get_system_timestamp_nsec(color_image)
# Set the original value on the device and read it back.
k4a._bindings.k4a.k4a_image_set_system_timestamp_nsec(color_image, saved_value)
saved_value_readback = k4a._bindings.k4a.k4a_image_get_system_timestamp_nsec(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(new_value_readback, new_value)
self.assertEqual(saved_value_readback, saved_value)
self.assertNotEqual(new_value, saved_value)
self.assertNotEqual(saved_value_readback, new_value_readback)
def test_functional_fast_ctypes_image_set_exposure_usec(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Save the original value.
saved_value = k4a._bindings.k4a.k4a_image_get_exposure_usec(color_image)
# Set a new value and read it back.
new_value = saved_value + 1
k4a._bindings.k4a.k4a_image_set_exposure_usec(color_image, new_value)
new_value_readback = k4a._bindings.k4a.k4a_image_get_exposure_usec(color_image)
# Set the original value on the device and read it back.
k4a._bindings.k4a.k4a_image_set_exposure_usec(color_image, saved_value)
saved_value_readback = k4a._bindings.k4a.k4a_image_get_exposure_usec(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(new_value_readback, new_value)
self.assertEqual(saved_value_readback, saved_value)
self.assertNotEqual(new_value, saved_value)
self.assertNotEqual(saved_value_readback, new_value_readback)
def test_functional_fast_ctypes_image_set_white_balance(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Save the original value.
saved_value = k4a._bindings.k4a.k4a_image_get_white_balance(color_image)
# Set a new value and read it back.
new_value = saved_value + 1
k4a._bindings.k4a.k4a_image_set_white_balance(color_image, new_value)
new_value_readback = k4a._bindings.k4a.k4a_image_get_white_balance(color_image)
# Set the original value on the device and read it back.
k4a._bindings.k4a.k4a_image_set_white_balance(color_image, saved_value)
saved_value_readback = k4a._bindings.k4a.k4a_image_get_white_balance(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(new_value_readback, new_value)
self.assertEqual(saved_value_readback, saved_value)
self.assertNotEqual(new_value, saved_value)
self.assertNotEqual(saved_value_readback, new_value_readback)
def test_functional_fast_ctypes_image_set_iso_speed(self):
with self.lock:
capture = get_1080p_bgr32_nfov_2x2binned(self.device_handle)
self.assertIsNotNone(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Save the original value.
saved_value = k4a._bindings.k4a.k4a_image_get_iso_speed(color_image)
# Set a new value and read it back.
new_value = saved_value + 1
k4a._bindings.k4a.k4a_image_set_iso_speed(color_image, new_value)
new_value_readback = k4a._bindings.k4a.k4a_image_get_iso_speed(color_image)
# Set the original value on the device and read it back.
k4a._bindings.k4a.k4a_image_set_iso_speed(color_image, saved_value)
saved_value_readback = k4a._bindings.k4a.k4a_image_get_iso_speed(color_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertEqual(new_value_readback, new_value)
self.assertEqual(saved_value_readback, saved_value)
self.assertNotEqual(new_value, saved_value)
self.assertNotEqual(saved_value_readback, new_value_readback)
def test_functional_fast_ctypes_device_start_cameras_stop_cameras(self):
with self.lock:
# Start the cameras.
device_config = k4a.DeviceConfiguration()
device_config.color_format = k4a.EImageFormat.COLOR_BGRA32
device_config.color_resolution = k4a.EColorResolution.RES_1080P
device_config.depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
device_config.camera_fps = k4a.EFramesPerSecond.FPS_15
device_config.synchronized_images_only = True
device_config.depth_delay_off_color_usec = 0
device_config.wired_sync_mode = k4a.EWiredSyncMode.STANDALONE
device_config.subordinate_delay_off_master_usec = 0
device_config.disable_streaming_indicator = False
status = k4a._bindings.k4a.k4a_device_start_cameras(self.device_handle, ctypes.byref(device_config))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_device_stop_cameras(self.device_handle)
def test_functional_fast_ctypes_device_start_cameras_stop_cameras_DEFAULT_DISABLE(self):
with self.lock:
device_config = k4a.DEVICE_CONFIG_DISABLE_ALL
status = k4a._bindings.k4a.k4a_device_start_cameras(self.device_handle, ctypes.byref(device_config))
self.assertTrue(k4a.K4A_FAILED(status)) # Seems to fail when DISABLE_ALL config is used.
k4a._bindings.k4a.k4a_device_stop_cameras(self.device_handle)
def test_functional_fast_ctypes_device_start_imu_stop_imu(self):
with self.lock:
device_config = k4a.DEVICE_CONFIG_BGRA32_1080P_NFOV_2X2BINNED_FPS15
status = k4a._bindings.k4a.k4a_device_start_cameras(self.device_handle, ctypes.byref(device_config))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
status = k4a._bindings.k4a.k4a_device_start_imu(self.device_handle)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_device_stop_imu(self.device_handle)
k4a._bindings.k4a.k4a_device_stop_cameras(self.device_handle)
def test_functional_fast_ctypes_device_get_serialnum(self):
strsize = ctypes.c_size_t(32)
serial_number = (ctypes.c_char * strsize.value)()
status = k4a._bindings.k4a.k4a_device_get_serialnum(self.device_handle, serial_number, ctypes.byref(strsize))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
def test_functional_fast_ctypes_device_get_version(self):
hwver = k4a.HardwareVersion()
status = k4a._bindings.k4a.k4a_device_get_version(self.device_handle, ctypes.byref(hwver))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Check the versions.
self.assertTrue(hwver.rgb.major != 0 or hwver.rgb.minor != 0 or hwver.rgb.iteration != 0)
self.assertTrue(hwver.depth.major != 0 or hwver.depth.minor != 0 or hwver.depth.iteration != 0)
self.assertTrue(hwver.audio.major != 0 or hwver.audio.minor != 0 or hwver.audio.iteration != 0)
self.assertTrue(hwver.depth_sensor.major != 0 or hwver.depth_sensor.minor != 0 or hwver.depth_sensor.iteration != 0)
def test_functional_fast_ctypes_device_get_color_control_capabilities(self):
color_control_commands = [
k4a.EColorControlCommand.BACKLIGHT_COMPENSATION,
k4a.EColorControlCommand.BRIGHTNESS,
k4a.EColorControlCommand.CONTRAST,
k4a.EColorControlCommand.EXPOSURE_TIME_ABSOLUTE,
k4a.EColorControlCommand.GAIN,
k4a.EColorControlCommand.POWERLINE_FREQUENCY,
k4a.EColorControlCommand.SATURATION,
k4a.EColorControlCommand.SHARPNESS,
k4a.EColorControlCommand.WHITEBALANCE
]
for command in color_control_commands:
with self.subTest(command = command):
status = k4a_device_get_color_control_capability(self.device_handle, command)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
def test_functional_fast_ctypes_device_get_color_control(self):
color_control_commands = [
k4a.EColorControlCommand.BACKLIGHT_COMPENSATION,
k4a.EColorControlCommand.BRIGHTNESS,
k4a.EColorControlCommand.CONTRAST,
k4a.EColorControlCommand.EXPOSURE_TIME_ABSOLUTE,
k4a.EColorControlCommand.GAIN,
k4a.EColorControlCommand.POWERLINE_FREQUENCY,
k4a.EColorControlCommand.SATURATION,
k4a.EColorControlCommand.SHARPNESS,
k4a.EColorControlCommand.WHITEBALANCE
]
for command in color_control_commands:
with self.subTest(command = command):
mode = ctypes.c_int32(k4a.EColorControlMode.AUTO.value)
value = ctypes.c_int32(0)
status = k4a._bindings.k4a.k4a_device_get_color_control(
self.device_handle,
ctypes.c_int(command.value),
ctypes.byref(mode),
ctypes.byref(value)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# For some reason, manually setting EXPOSURE_TIME_ABSOLUTE fails.
def test_functional_fast_ctypes_device_set_color_control(self):
color_control_commands = [
k4a.EColorControlCommand.BACKLIGHT_COMPENSATION,
k4a.EColorControlCommand.BRIGHTNESS,
k4a.EColorControlCommand.CONTRAST,
#k4a.EColorControlCommand.EXPOSURE_TIME_ABSOLUTE,
k4a.EColorControlCommand.GAIN,
k4a.EColorControlCommand.POWERLINE_FREQUENCY,
k4a.EColorControlCommand.SATURATION,
k4a.EColorControlCommand.SHARPNESS,
#k4a.EColorControlCommand.WHITEBALANCE
]
for command in color_control_commands:
with self.subTest(command = command):
(status, status0, status1, status2, status3, status4,
saved_value, saved_value_readback, new_value, new_value_readback) = \
k4a_device_set_and_get_color_control(self.device_handle, command)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertTrue(k4a.K4A_SUCCEEDED(status0))
self.assertTrue(k4a.K4A_SUCCEEDED(status1))
self.assertTrue(k4a.K4A_SUCCEEDED(status2))
self.assertTrue(k4a.K4A_SUCCEEDED(status3))
self.assertTrue(k4a.K4A_SUCCEEDED(status4))
self.assertEqual(saved_value.value, saved_value_readback.value)
self.assertEqual(new_value.value, new_value_readback.value)
self.assertNotEqual(saved_value.value, new_value.value)
def test_functional_fast_ctypes_device_get_raw_calibration(self):
with self.lock:
# Get buffer size requirement.
buffer_size = ctypes.c_size_t(0)
buffer = ctypes.c_uint8(0)
status = k4a._bindings.k4a.k4a_device_get_raw_calibration(
self.device_handle, ctypes.byref(buffer), ctypes.byref(buffer_size))
self.assertEqual(status, k4a.EBufferStatus.BUFFER_TOO_SMALL)
buffer = ctypes.create_string_buffer(buffer_size.value)
buffer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_uint8))
status = k4a._bindings.k4a.k4a_device_get_raw_calibration(
self.device_handle, buffer, ctypes.byref(buffer_size))
self.assertEqual(status, k4a.EBufferStatus.SUCCEEDED)
def test_functional_fast_ctypes_device_get_calibration(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration = k4a._bindings.k4a._Calibration()
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(depth_mode = depth_mode, color_resolution = color_resolution):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
def test_functional_fast_ctypes_device_get_sync_jack(self):
sync_in = ctypes.c_bool(False)
sync_out = ctypes.c_bool(False)
status = k4a._bindings.k4a.k4a_device_get_sync_jack(
self.device_handle, ctypes.byref(sync_in), ctypes.byref(sync_out))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
def test_functional_fast_ctypes_calibration_get_from_raw(self):
with self.lock:
# Get buffer size requirement.
buffer_size = ctypes.c_size_t(0)
buffer = ctypes.c_uint8(0)
status = k4a._bindings.k4a.k4a_device_get_raw_calibration(
self.device_handle, ctypes.byref(buffer), ctypes.byref(buffer_size))
self.assertEqual(status, k4a.EBufferStatus.BUFFER_TOO_SMALL)
buffer = ctypes.create_string_buffer(buffer_size.value)
buffer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_uint8))
status = k4a._bindings.k4a.k4a_device_get_raw_calibration(
self.device_handle, buffer, ctypes.byref(buffer_size))
self.assertEqual(status, k4a.EBufferStatus.SUCCEEDED)
# Now get the calibration from the buffer.
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
buffer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_char))
calibration = k4a._bindings.k4a._Calibration()
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(depth_mode = depth_mode, color_resolution = color_resolution):
status = k4a._bindings.k4a.k4a_calibration_get_from_raw(
buffer,
buffer_size,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
def test_functional_fast_ctypes_calibration_3d_to_3d(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
calibration = k4a._bindings.k4a._Calibration()
source_point = k4a._bindings.k4a._Float3(300, 300, 500)
target_point = k4a._bindings.k4a._Float3()
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_3d_to_3d(
ctypes.byref(calibration),
ctypes.byref(source_point),
source_camera,
target_camera,
ctypes.byref(target_point))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
if source_camera == target_camera:
self.assertAlmostEqual(source_point.xyz.x, target_point.xyz.x)
self.assertAlmostEqual(source_point.xyz.y, target_point.xyz.y)
self.assertAlmostEqual(source_point.xyz.z, target_point.xyz.z)
def test_functional_fast_ctypes_calibration_2d_to_3d(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
calibration = k4a._bindings.k4a._Calibration()
source_point = k4a._bindings.k4a._Float2(300, 300)
depth_mm = 500.0
target_point = k4a._bindings.k4a._Float3()
valid_int_flag = ctypes.c_int(0)
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_2d_to_3d(
ctypes.byref(calibration),
ctypes.byref(source_point),
ctypes.c_float(depth_mm),
ctypes.c_int(source_camera),
ctypes.c_int(target_camera),
ctypes.byref(target_point),
ctypes.byref(valid_int_flag))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertEqual(valid_int_flag.value, 1)
def test_functional_fast_ctypes_calibration_3d_to_2d(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
calibration = k4a._bindings.k4a._Calibration()
source_point = k4a._bindings.k4a._Float3(300, 300, 500)
target_point = k4a._bindings.k4a._Float2()
valid_int_flag = ctypes.c_int(0)
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_3d_to_2d(
ctypes.byref(calibration),
ctypes.byref(source_point),
source_camera,
target_camera,
ctypes.byref(target_point),
ctypes.byref(valid_int_flag))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertEqual(valid_int_flag.value, 1)
def test_functional_fast_ctypes_calibration_2d_to_2d(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
source_camera = k4a.ECalibrationType.COLOR
target_camera = k4a.ECalibrationType.DEPTH
calibration = k4a._bindings.k4a._Calibration()
source_point = k4a._bindings.k4a._Float2(300, 300)
depth_mm = 500
target_point = k4a._bindings.k4a._Float2()
valid_int_flag = ctypes.c_int(0)
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_2d_to_2d(
ctypes.byref(calibration),
ctypes.byref(source_point),
depth_mm,
source_camera,
target_camera,
ctypes.byref(target_point),
ctypes.byref(valid_int_flag))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertEqual(valid_int_flag.value, 1)
if source_camera == target_camera:
self.assertAlmostEqual(source_point.xy.x, target_point.xy.x)
self.assertAlmostEqual(source_point.xy.y, target_point.xy.y)
def test_functional_fast_ctypes_calibration_color_2d_to_depth_2d(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
calibration = k4a._bindings.k4a._Calibration()
target_point = k4a._bindings.k4a._Float2()
valid_int_flag = ctypes.c_int(0)
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Get a depth image.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
color_resolution,
depth_mode)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
self.assertIsNotNone(depth_image)
# Get color image width and height to specify the source point.
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(color_image)
height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(color_image)
source_point = k4a._bindings.k4a._Float2(width_pixels/4, height_pixels/4)
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_color_2d_to_depth_2d(
ctypes.byref(calibration),
ctypes.byref(source_point),
depth_image,
ctypes.byref(target_point),
ctypes.byref(valid_int_flag))
k4a._bindings.k4a.k4a_image_release(depth_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertEqual(valid_int_flag.value, 1)
def test_functional_ctypes_calibration_3d_to_3d(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration_types = [
k4a.ECalibrationType.COLOR,
k4a.ECalibrationType.DEPTH
]
calibration = k4a._bindings.k4a._Calibration()
source_point = k4a._bindings.k4a._Float3(300, 300, 500)
target_point = k4a._bindings.k4a._Float3()
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
for source_camera in calibration_types:
for target_camera in calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_3d_to_3d(
ctypes.byref(calibration),
ctypes.byref(source_point),
source_camera,
target_camera,
ctypes.byref(target_point))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
if source_camera == target_camera:
self.assertAlmostEqual(source_point.xyz.x, target_point.xyz.x)
self.assertAlmostEqual(source_point.xyz.y, target_point.xyz.y)
self.assertAlmostEqual(source_point.xyz.z, target_point.xyz.z)
def test_functional_ctypes_calibration_2d_to_3d(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration_types = [
k4a.ECalibrationType.COLOR,
k4a.ECalibrationType.DEPTH
]
calibration = k4a._bindings.k4a._Calibration()
source_point = k4a._bindings.k4a._Float2(300, 300)
depth_mm = 500.0
target_point = k4a._bindings.k4a._Float3()
valid_int_flag = ctypes.c_int(0)
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
for source_camera in calibration_types:
for target_camera in calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_2d_to_3d(
ctypes.byref(calibration),
ctypes.byref(source_point),
ctypes.c_float(depth_mm),
ctypes.c_int(source_camera),
ctypes.c_int(target_camera),
ctypes.byref(target_point),
ctypes.byref(valid_int_flag))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertEqual(valid_int_flag.value, 1)
def test_functional_ctypes_calibration_3d_to_2d(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration_types = [
k4a.ECalibrationType.COLOR,
k4a.ECalibrationType.DEPTH
]
calibration = k4a._bindings.k4a._Calibration()
source_point = k4a._bindings.k4a._Float3(300, 300, 500)
target_point = k4a._bindings.k4a._Float2()
valid_int_flag = ctypes.c_int(0)
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
for source_camera in calibration_types:
for target_camera in calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_3d_to_2d(
ctypes.byref(calibration),
ctypes.byref(source_point),
source_camera,
target_camera,
ctypes.byref(target_point),
ctypes.byref(valid_int_flag))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertEqual(valid_int_flag.value, 1)
def test_functional_ctypes_calibration_2d_to_2d(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration_types = [
k4a.ECalibrationType.COLOR,
k4a.ECalibrationType.DEPTH
]
calibration = k4a._bindings.k4a._Calibration()
source_point = k4a._bindings.k4a._Float2(300, 300)
depth_mm = 500
target_point = k4a._bindings.k4a._Float2()
valid_int_flag = ctypes.c_int(0)
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
for source_camera in calibration_types:
for target_camera in calibration_types:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution,
source_camera = source_camera,
target_camera = target_camera):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_2d_to_2d(
ctypes.byref(calibration),
ctypes.byref(source_point),
depth_mm,
source_camera,
target_camera,
ctypes.byref(target_point),
ctypes.byref(valid_int_flag))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertEqual(valid_int_flag.value, 1)
if source_camera == target_camera:
self.assertAlmostEqual(source_point.xy.x, target_point.xy.x)
self.assertAlmostEqual(source_point.xy.y, target_point.xy.y)
# This test is data dependent. It may fail based on scene content.
# It is favorable to point the camera at a flat wall about 30 cm away.
# Perhaps it's better to generate synthetic data.
def test_functional_ctypes_calibration_color_2d_to_depth_2d(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration = k4a._bindings.k4a._Calibration()
target_point = k4a._bindings.k4a._Float2()
valid_int_flag = ctypes.c_int(0)
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Get a depth image.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
color_resolution,
depth_mode)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
self.assertIsNotNone(depth_image)
# Get color image width and height to specify the source point.
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(color_image)
height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(color_image)
source_point = k4a._bindings.k4a._Float2(width_pixels/4, height_pixels/4)
# Transform source point from source_camera to target_camera.
status = k4a._bindings.k4a.k4a_calibration_color_2d_to_depth_2d(
ctypes.byref(calibration),
ctypes.byref(source_point),
depth_image,
ctypes.byref(target_point),
ctypes.byref(valid_int_flag))
k4a._bindings.k4a.k4a_image_release(depth_image)
k4a._bindings.k4a.k4a_image_release(color_image)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
self.assertEqual(valid_int_flag.value, 1)
def test_functional_fast_ctypes_transformation_create_destroy(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
k4a.EDepthMode.PASSIVE_IR,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration = k4a._bindings.k4a._Calibration()
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
def test_functional_fast_ctypes_transformation_depth_image_to_color_camera(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
calibration = k4a._bindings.k4a._Calibration()
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
# Get a depth image.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
color_resolution,
depth_mode)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
image_format = k4a._bindings.k4a.k4a_image_get_format(depth_image)
# Get color image width and height.
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(color_image)
height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(color_image)
stride_bytes = width_pixels * 2
# Create an output depth image.
transformed_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format,
width_pixels,
height_pixels,
stride_bytes,
ctypes.byref(transformed_image)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Apply the transformation.
status = k4a._bindings.k4a.k4a_transformation_depth_image_to_color_camera(
transformation,
depth_image,
transformed_image
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
k4a._bindings.k4a.k4a_image_release(transformed_image)
k4a._bindings.k4a.k4a_image_release(depth_image)
def test_functional_fast_ctypes_transformation_depth_image_to_color_camera_custom(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
calibration = k4a._bindings.k4a._Calibration()
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
# Get a capture.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
color_resolution,
depth_mode)
# Get color image width and height.
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
output_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(color_image)
output_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(color_image)
output_stride_bytes = output_width_pixels * 2
# Get a depth image.
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
image_format = k4a._bindings.k4a.k4a_image_get_format(depth_image)
input_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(depth_image)
input_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(depth_image)
# Create an output depth image.
transformed_depth_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format,
output_width_pixels,
output_height_pixels,
output_stride_bytes,
ctypes.byref(transformed_depth_image)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Create a custom image.
image_format = k4a.EImageFormat.CUSTOM16
custom_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format.value,
input_width_pixels,
input_height_pixels,
input_width_pixels * 2,
ctypes.byref(custom_image))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Create a transformed custom image.
image_format = k4a.EImageFormat.CUSTOM16
transformed_custom_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format.value,
output_width_pixels,
output_height_pixels,
output_width_pixels * 2,
ctypes.byref(transformed_custom_image))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Apply the transformation.
status = k4a._bindings.k4a.k4a_transformation_depth_image_to_color_camera_custom(
transformation,
depth_image,
custom_image,
transformed_depth_image,
transformed_custom_image,
k4a.ETransformInterpolationType.LINEAR,
0
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
k4a._bindings.k4a.k4a_image_release(depth_image)
k4a._bindings.k4a.k4a_image_release(custom_image)
k4a._bindings.k4a.k4a_image_release(transformed_depth_image)
k4a._bindings.k4a.k4a_image_release(transformed_custom_image)
def test_functional_fast_ctypes_transformation_color_image_to_depth_camera(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
color_resolution = k4a.EColorResolution.RES_720P
calibration = k4a._bindings.k4a._Calibration()
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
# Get a capture and depth and color images.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
color_resolution,
depth_mode)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Create an output image.
image_format = k4a._bindings.k4a.k4a_image_get_format(color_image)
width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(depth_image)
height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(depth_image)
stride_bytes = width_pixels * 4
transformed_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format,
width_pixels,
height_pixels,
stride_bytes,
ctypes.byref(transformed_image)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Apply the transformation.
status = k4a._bindings.k4a.k4a_transformation_color_image_to_depth_camera(
transformation,
depth_image,
color_image,
transformed_image
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
k4a._bindings.k4a.k4a_image_release(transformed_image)
k4a._bindings.k4a.k4a_image_release(depth_image)
k4a._bindings.k4a.k4a_image_release(color_image)
def test_functional_fast_ctypes_transformation_depth_image_to_point_cloud(self):
with self.lock:
depth_mode = k4a.EDepthMode.NFOV_2X2BINNED
calibration = k4a._bindings.k4a._Calibration()
# Get a capture and depth image.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
k4a.EColorResolution.RES_1080P,
depth_mode)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
# Create an output image.
image_format = k4a.EImageFormat.CUSTOM
width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(depth_image)
height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(depth_image)
stride_bytes = width_pixels * 6
xyz_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
ctypes.c_int(image_format),
ctypes.c_int(width_pixels),
ctypes.c_int(height_pixels),
ctypes.c_int(stride_bytes),
ctypes.byref(xyz_image)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Get a transformation.
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
k4a.EColorResolution.RES_1080P,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(
ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
# Apply the transformation.
status = k4a._bindings.k4a.k4a_transformation_depth_image_to_point_cloud(
transformation,
depth_image,
k4a.ECalibrationType.DEPTH,
xyz_image
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
k4a._bindings.k4a.k4a_image_release(xyz_image)
k4a._bindings.k4a.k4a_image_release(depth_image)
def test_functional_ctypes_transformation_depth_image_to_color_camera(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration = k4a._bindings.k4a._Calibration()
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
# Get a depth image.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
color_resolution,
depth_mode)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
image_format = k4a._bindings.k4a.k4a_image_get_format(depth_image)
# Get color image width and height.
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(color_image)
height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(color_image)
stride_bytes = width_pixels * 2
# Create an output depth image.
transformed_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format,
width_pixels,
height_pixels,
stride_bytes,
ctypes.byref(transformed_image)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Apply the transformation.
status = k4a._bindings.k4a.k4a_transformation_depth_image_to_color_camera(
transformation,
depth_image,
transformed_image
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
k4a._bindings.k4a.k4a_image_release(transformed_image)
k4a._bindings.k4a.k4a_image_release(depth_image)
def test_functional_ctypes_transformation_depth_image_to_color_camera_custom(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration = k4a._bindings.k4a._Calibration()
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
# Get a capture.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
color_resolution,
depth_mode)
# Get color image width and height.
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
output_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(color_image)
output_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(color_image)
output_stride_bytes = output_width_pixels * 2
# Get a depth image.
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
image_format = k4a._bindings.k4a.k4a_image_get_format(depth_image)
input_width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(depth_image)
input_height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(depth_image)
# Create an output depth image.
transformed_depth_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format,
output_width_pixels,
output_height_pixels,
output_stride_bytes,
ctypes.byref(transformed_depth_image)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Create a custom image.
image_format = k4a.EImageFormat.CUSTOM16
custom_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format.value,
input_width_pixels,
input_height_pixels,
input_width_pixels * 2,
ctypes.byref(custom_image))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Create a transformed custom image.
image_format = k4a.EImageFormat.CUSTOM16
transformed_custom_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format.value,
output_width_pixels,
output_height_pixels,
output_width_pixels * 2,
ctypes.byref(transformed_custom_image))
self.assertEqual(k4a.EStatus.SUCCEEDED, status)
# Apply the transformation.
status = k4a._bindings.k4a.k4a_transformation_depth_image_to_color_camera_custom(
transformation,
depth_image,
custom_image,
transformed_depth_image,
transformed_custom_image,
k4a.ETransformInterpolationType.LINEAR,
0
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
k4a._bindings.k4a.k4a_image_release(depth_image)
k4a._bindings.k4a.k4a_image_release(custom_image)
k4a._bindings.k4a.k4a_image_release(transformed_depth_image)
k4a._bindings.k4a.k4a_image_release(transformed_custom_image)
def test_functional_ctypes_transformation_color_image_to_depth_camera(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
]
color_resolutions = [
k4a.EColorResolution.RES_3072P,
k4a.EColorResolution.RES_2160P,
k4a.EColorResolution.RES_1536P,
k4a.EColorResolution.RES_1440P,
k4a.EColorResolution.RES_1080P,
k4a.EColorResolution.RES_720P,
]
calibration = k4a._bindings.k4a._Calibration()
for depth_mode in depth_modes:
for color_resolution in color_resolutions:
with self.subTest(depth_mode = depth_mode,
color_resolution = color_resolution):
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
color_resolution,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
# Get a capture and depth and color images.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
color_resolution,
depth_mode)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
color_image = k4a._bindings.k4a.k4a_capture_get_color_image(capture)
# Create an output image.
image_format = k4a._bindings.k4a.k4a_image_get_format(color_image)
width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(depth_image)
height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(depth_image)
stride_bytes = width_pixels * 4
transformed_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
image_format,
width_pixels,
height_pixels,
stride_bytes,
ctypes.byref(transformed_image)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Apply the transformation.
status = k4a._bindings.k4a.k4a_transformation_color_image_to_depth_camera(
transformation,
depth_image,
color_image,
transformed_image
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
k4a._bindings.k4a.k4a_image_release(transformed_image)
k4a._bindings.k4a.k4a_image_release(depth_image)
k4a._bindings.k4a.k4a_image_release(color_image)
def test_functional_ctypes_transformation_depth_image_to_point_cloud(self):
with self.lock:
depth_modes = [
k4a.EDepthMode.NFOV_2X2BINNED,
k4a.EDepthMode.NFOV_UNBINNED,
k4a.EDepthMode.WFOV_2X2BINNED,
k4a.EDepthMode.WFOV_UNBINNED,
]
for depth_mode in depth_modes:
with self.subTest(depth_mode = depth_mode):
calibration = k4a._bindings.k4a._Calibration()
# Get a capture and depth image.
capture = test_config.get_capture(self.device_handle,
k4a.EImageFormat.COLOR_BGRA32,
k4a.EColorResolution.RES_1080P,
depth_mode)
depth_image = k4a._bindings.k4a.k4a_capture_get_depth_image(capture)
# Create an output image.
image_format = k4a.EImageFormat.CUSTOM
width_pixels = k4a._bindings.k4a.k4a_image_get_width_pixels(depth_image)
height_pixels = k4a._bindings.k4a.k4a_image_get_height_pixels(depth_image)
stride_bytes = width_pixels * 6
xyz_image = k4a._bindings.k4a._ImageHandle()
status = k4a._bindings.k4a.k4a_image_create(
ctypes.c_int(image_format),
ctypes.c_int(width_pixels),
ctypes.c_int(height_pixels),
ctypes.c_int(stride_bytes),
ctypes.byref(xyz_image)
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
# Get a transformation.
status = k4a._bindings.k4a.k4a_device_get_calibration(
self.device_handle,
depth_mode,
k4a.EColorResolution.RES_1080P,
ctypes.byref(calibration))
self.assertTrue(k4a.K4A_SUCCEEDED(status))
transformation = k4a._bindings.k4a.k4a_transformation_create(
ctypes.byref(calibration))
self.assertIsNotNone(transformation) # Might not be a valid assert.
# Apply the transformation.
status = k4a._bindings.k4a.k4a_transformation_depth_image_to_point_cloud(
transformation,
depth_image,
k4a.ECalibrationType.DEPTH,
xyz_image
)
self.assertTrue(k4a.K4A_SUCCEEDED(status))
k4a._bindings.k4a.k4a_transformation_destroy(transformation)
k4a._bindings.k4a.k4a_image_release(xyz_image)
k4a._bindings.k4a.k4a_image_release(depth_image)
if __name__ == '__main__':
unittest.main()
| 46.19894
| 124
| 0.625009
| 10,375
| 95,909
| 5.385831
| 0.034795
| 0.042199
| 0.098214
| 0.098876
| 0.937507
| 0.921006
| 0.901303
| 0.883031
| 0.860911
| 0.83983
| 0
| 0.03988
| 0.308991
| 95,909
| 2,076
| 125
| 46.19894
| 0.803256
| 0.055261
| 0
| 0.756254
| 0
| 0
| 0.000088
| 0
| 0
| 0
| 0
| 0
| 0.142399
| 1
| 0.044259
| false
| 0.00449
| 0.003207
| 0.000641
| 0.050032
| 0.000641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.